Commit 8314d6ee authored by Deshui Yu's avatar Deshui Yu Committed by fishyds
Browse files

Merge from dogfood branch to master

parent 98530fd2
BIN_PATH ?= /usr/bin BIN_PATH ?= ${HOME}/.local/bin
NODE_PATH ?= /usr/share INSTALL_PREFIX ?= ${HOME}/.local
EXAMPLE_PATH ?= /usr/share/nni/examples PIP_MODE ?= --user
EXAMPLES_PATH ?= ${HOME}/nni/examples
SRC_DIR := ${PWD} WHOAMI := $(shell whoami)
.PHONY: build install uninstall dev-install
.PHONY: build install uninstall YARN := $(INSTALL_PREFIX)/yarn/bin/yarn
build: build:
### Building NNI Manager ### ### Building NNI Manager ###
...@@ -21,50 +21,124 @@ build: ...@@ -21,50 +21,124 @@ build:
install: install:
mkdir -p $(NODE_PATH)/nni ifneq ('$(HOME)', '/root')
mkdir -p $(EXAMPLE_PATH) ifeq (${WHOAMI}, root)
### Sorry, sudo make install is not supported ###
exit 1
endif
endif
mkdir -p $(BIN_PATH)
mkdir -p $(INSTALL_PREFIX)/nni
mkdir -p $(EXAMPLES_PATH)
### Installing NNI Manager ### ### Installing NNI Manager ###
cp -rT src/nni_manager/dist $(NODE_PATH)/nni/nni_manager cp -rT src/nni_manager/dist $(INSTALL_PREFIX)/nni/nni_manager
cp -rT src/nni_manager/node_modules $(NODE_PATH)/nni/nni_manager/node_modules cp -rT src/nni_manager/node_modules $(INSTALL_PREFIX)/nni/nni_manager/node_modules
### Installing Web UI ### ### Installing Web UI ###
cp -rT src/webui/build $(NODE_PATH)/nni/webui cp -rT src/webui/build $(INSTALL_PREFIX)/nni/webui
ln -sf $(NODE_PATH)/nni/nni_manager/node_modules/serve/bin/serve.js $(BIN_PATH)/serve ln -sf $(INSTALL_PREFIX)/nni/nni_manager/node_modules/serve/bin/serve.js $(BIN_PATH)/serve
### Installing Python SDK dependencies ### ### Installing Python SDK dependencies ###
pip3 install -r src/sdk/pynni/requirements.txt pip3 install $(PIP_MODE) -r src/sdk/pynni/requirements.txt
### Installing Python SDK ### ### Installing Python SDK ###
cd src/sdk/pynni && python3 setup.py install cd src/sdk/pynni && python3 setup.py install $(PIP_MODE)
### Installing nnictl ### ### Installing nnictl ###
cd tools && python3 setup.py install cd tools && python3 setup.py install $(PIP_MODE)
echo '#!/bin/sh' > $(BIN_PATH)/nnimanager echo '#!/bin/sh' > $(BIN_PATH)/nnimanager
echo 'cd $(NODE_PATH)/nni/nni_manager && node main.js $$@' >> $(BIN_PATH)/nnimanager echo 'cd $(INSTALL_PREFIX)/nni/nni_manager && node main.js $$@' >> $(BIN_PATH)/nnimanager
chmod +x $(BIN_PATH)/nnimanager chmod +x $(BIN_PATH)/nnimanager
install -m 755 tools/nnictl $(BIN_PATH)/nnictl echo '#!/bin/sh' > $(BIN_PATH)/nnictl
echo 'NNI_MANAGER=$(BIN_PATH)/nnimanager WEB_UI_FOLDER=$(INSTALL_PREFIX)/nni/webui python3 -m nnicmd.nnictl $$@' >> $(BIN_PATH)/nnictl
chmod +x $(BIN_PATH)/nnictl
### Installing examples ### ### Installing examples ###
cp -rT examples $(EXAMPLE_PATH) cp -rT examples $(EXAMPLES_PATH)
pip-install:
ifneq ('$(HOME)', '/root')
ifeq (${WHOAMI}, root)
### Sorry, sudo make install is not supported ###
exit 1
endif
endif
### Prepare Node.js ###
wget https://nodejs.org/dist/v10.9.0/node-v10.9.0-linux-x64.tar.xz
tar xf node-v10.9.0-linux-x64.tar.xz
cp -rT node-v10.9.0-linux-x64 $(INSTALL_PREFIX)/node
### Prepare Yarn 1.9.4 ###
wget https://github.com/yarnpkg/yarn/releases/download/v1.9.4/yarn-v1.9.4.tar.gz
tar xf yarn-v1.9.4.tar.gz
cp -rT yarn-v1.9.4 $(INSTALL_PREFIX)/yarn
### Building NNI Manager ###
cd src/nni_manager && $(YARN) && $(YARN) build
### Building Web UI ###
cd src/webui && $(YARN) && $(YARN) build
mkdir -p $(BIN_PATH)
mkdir -p $(INSTALL_PREFIX)/nni
### Installing NNI Manager ###
cp -rT src/nni_manager/dist $(INSTALL_PREFIX)/nni/nni_manager
cp -rT src/nni_manager/node_modules $(INSTALL_PREFIX)/nni/nni_manager/node_modules
echo '#!/bin/sh' > $(BIN_PATH)/nnimanager
echo 'cd $(INSTALL_PREFIX)/nni/nni_manager && node main.js $$@' >> $(BIN_PATH)/nnimanager
chmod +x $(BIN_PATH)/nnimanager
### Installing Web UI ###
cp -rT src/webui/build $(INSTALL_PREFIX)/nni/webui
ln -sf $(INSTALL_PREFIX)/nni/nni_manager/node_modules/serve/bin/serve.js $(BIN_PATH)/serve
### Installing examples ###
cp -rT examples $(EXAMPLES_PATH)
dev-install: dev-install:
mkdir -p $(BIN_PATH)
mkdir -p $(INSTALL_PREFIX)/nni
### Installing NNI Manager ###
ln -sf $(INSTALL_PREFIX)/nni/nni_manager $(PWD)/src/nni_manager/dist
ln -sf $(INSTALL_PREFIX)/nni/nni_manager/node_modules $(PWD)/src/nni_manager/node_modules
### Installing Web UI ###
ln -sf $(INSTALL_PREFIX)/nni/webui $(PWD)/src/webui
ln -sf $(INSTALL_PREFIX)/nni/nni_manager/node_modules/serve/bin/serve.js $(BIN_PATH)/serve
### Installing Python SDK dependencies ### ### Installing Python SDK dependencies ###
pip3 install --user -r src/sdk/pynni/requirements.txt pip3 install $(PIP_MODE) -r src/sdk/pynni/requirements.txt
### Installing Python SDK ### ### Installing Python SDK ###
cd src/sdk/pynni && pip3 install --user -e . cd src/sdk/pynni && pip3 install $(PIP_MODE) -e .
### Installing nnictl ### ### Installing nnictl ###
cd tools && pip3 install --user -e . cd tools && pip3 install $(PIP_MODE) -e .
echo '#!/bin/sh' > $(BIN_PATH)/nnimanager
echo 'cd $(INSTALL_PREFIX)/nni/nni_manager && node main.js $$@' >> $(BIN_PATH)/nnimanager
chmod +x $(BIN_PATH)/nnimanager
echo '#!/bin/sh' > $(BIN_PATH)/nnictl
echo 'NNI_MANAGER=$(BIN_PATH)/nnimanager python3 -m nnicmd.nnictl $$@' >> $(BIN_PATH)/nnictl
chmod +x $(BIN_PATH)/nnictl
### Installing examples ###
ln -sf $(EXAMPLES_PATH) $(PWD)/examples
uninstall: uninstall:
-rm -r $(EXAMPLE_PATH)
-rm -r $(NODE_PATH)/nni
-pip3 uninstall -y nnictl
-pip3 uninstall -y nni -pip3 uninstall -y nni
-rm $(BIN_PATH)/nnictl -pip3 uninstall -y nnictl
-rm $(BIN_PATH)/nnimanager -rm -r $(INSTALL_PREFIX)/nni
-rm -r $(EXAMPLES_PATH)
-rm $(BIN_PATH)/serve -rm $(BIN_PATH)/serve
-rm $(BIN_PATH)/nnimanager
-rm $(BIN_PATH)/nnictl
# Introduction # Introduction
Neural Network Intelligence(NNI) is a light package for supporting hyper-parameter tuning or neural architecture search.
It could easily run in different environments, such as: local/remote machine/cloud.
And it offers a new annotation language for user to conveniently design search space.
Also user could write code using any language or any machine learning framework.
# Getting Started NNI (Neural Network Intelligence) is a toolkit to help users running automated machine learning experiments.
TODO: Guide users through getting your code up and running on their own system. In this section you can talk about: The tool dispatches and runs trial jobs that generated by tuning algorithms to search the best neural architecture and/or hyper-parameters at different environments (e.g. local, remote servers, Cloud).
1. Installation process
2. Software dependencies ```
3. Latest releases AutoML experiment Training Services
4. API references ┌────────┐ ┌────────────────────────┐ ┌────────────────┐
│ nnictl │ ─────> │ nni_manager │ │ Local Machine │
└────────┘ │ sdk/tuner │ └────────────────┘
│ hyperopt_tuner │
│ evolution_tuner │ trail jobs ┌────────────────┐
│ ... │ ────────> │ Remote Servers │
├────────────────────────┤ └────────────────┘
│ trial job source code │
│ sdk/annotation │ ┌────────────────┐
├────────────────────────┤ │ Yarn,K8s, │
│ nni_board │ │ ... │
└────────────────────────┘ └────────────────┘
```
## **Who should consider using NNI**
* You want to try different AutoML algorithms for your training code (model) at local
* You want to run AutoML trial jobs in different environments to speed up search (e.g. remote servers, Cloud)
* As a researcher and data scientist, you want to implement your own AutoML algorithms and compare with other algorithms
* As a ML platform owner, you want to support AutoML in your platform
# Getting Started with NNI
## **Installation**
Install through python pip
* requirements: python >= 3.5
```
pip3 install -v --user git+https://github.com/Microsoft/NeuralNetworkIntelligence.git
source ~/.bashrc
```
## **Quick start: run an experiment at local**
Requirements:
* with NNI installed on your machine.
Run the following command to create an experiment for [mnist]
```bash
nnictl create --config ~/nni/examples/trials/mnist-annotation/config.yaml
```
This command will start the experiment and WebUI. The WebUI endpoint will be shown in the output of this command (for example, `http://localhost:8080`). Open this URL using your browsers. You can analyze your experiment through WebUI, or open trials' tensorboard. Please refer to [here](docs/GetStarted.md) for the GetStarted tutorial.
# Build and Test
TODO: Describe and show how to build your code and run the tests.
# Contribute # Contribute
TODO: Explain how other users and developers can contribute to make your code better. NNI is designed as an automatic searching framework with high extensibility. NNI has a very clear modular design. Contributing more tuner/assessor algorithms, training services, SDKs are really welcome. Please refer to [here](docs/ToContribute.md) for how to contribute.
# Privacy Statement # Privacy Statement
The [Microsoft Enterprise and Developer Privacy Statement](https://privacy.microsoft.com/en-us/privacystatement) describes the privacy statement of this software. The [Microsoft Enterprise and Developer Privacy Statement](https://privacy.microsoft.com/en-us/privacystatement) describes the privacy statement of this software.
# Customized Tuner for Experts
*Tuner receive result from Trial as a matric to evaluate the performance of a specific parameters/architecture configure. And tuner send next hyper-parameter or architecture configure to Trial.*
So, if user want to implement a customized Tuner, she/he only need to:
1) Inherit a tuner of a base Tuner class
2) Implement receive_trial_result and generate_parameter function
3) Write a script to run Tuner
Here ia an example:
**1) Inherit a tuner of a base Tuner class**
```python
from nni.tuner import Tuner
class CustomizedTuner(Tuner):
def __init__(self, ...):
...
```
**2) Implement receive_trial_result and generate_parameter function**
```python
from nni.tuner import Tuner
class CustomizedTuner(Tuner):
def __init__(self, ...):
...
def receive_trial_result(self, parameter_id, parameters, reward):
'''
Record an observation of the objective function and Train
parameter_id: int
parameters: object created by 'generate_parameters()'
reward: object reported by trial
'''
# your code implements here.
...
def generate_parameters(self, parameter_id):
'''
Returns a set of trial (hyper-)parameters, as a serializable object
parameter_id: int
'''
# your code implements here.
return your_parameters
...
```
```receive_trial_result``` will receive ```the parameter_id, parameters, reward``` as parameters input. Also, Tuner will receive the ```reward``` object are exactly same reward that Trial send.
The ```your_parameters``` return from ```generate_parameters``` function, will be package as json object by NNI SDK. NNI SDK will unpack json object so the Trial will receive the exact same ```your_parameters``` from Tuner.
For example:
If the you implement the ```generate_parameters``` like this:
```python
def generate_parameters(self, parameter_id):
'''
Returns a set of trial (hyper-)parameters, as a serializable object
parameter_id: int
'''
# your code implements here.
return {"dropout": 0.3, "learning_rate": 0.4}
```
It's means your Tuner will always generate parameters ```{"dropout": 0.3, "learning_rate": 0.4}```. Then Trial will receive ```{"dropout": 0.3, "learning_rate": 0.4}``` this object will using ```nni.get_parameters()``` API from NNI SDK. After training of Trial, it will send result to Tuner by calling ```nni.report_final_result(0.93)```. Then ```receive_trial_result``` will function will receied these parameters like:
```
parameter_id = 82347
parameters = {"dropout": 0.3, "learning_rate": 0.4}
reward = 0.93
```
**3) Configure your customized tuner in experiment yaml config file**
NNI needs to locate your customized tuner class and instantiate the class, so you need to specify the location of the customized tuner class and pass literal values as parameters to the \_\_init__ constructor.
```yaml
tuner:
codeDir: /home/abc/mytuner
classFileName: my_customized_tuner.py
className: CustomizedTuner
# Any parameter need to pass to your tuner class __init__ constructor
# can be specified in this optional classArgs field, for example
classArgs:
arg1: value1
```
More detail example you could see:
> * [evolution-tuner](../src/sdk/pynni/nni/evolution_tuner)
> * [hyperopt-tuner](../src/sdk/pynni/nni/hyperopt_tuner)
> * [evolution-based-customized-tuner](../examples/tuners/ga_customer_tuner)
...@@ -18,17 +18,19 @@ trainingServicePlatform: local ...@@ -18,17 +18,19 @@ trainingServicePlatform: local
# choice: true, false # choice: true, false
useAnnotation: true useAnnotation: true
tuner: tuner:
tunerName: TPE builtinTunerName: TPE
optimizationMode: Maximize classArgs:
optimize_mode: maximize
assessor: assessor:
assessorName: Medianstop builtinAssessorName: Medianstop
optimizationMode: Maximize classArgs:
optimize_mode: maximize
trial: trial:
trialCommand: python mnist.py command: python mnist.py
trialCodeDir: /usr/share/nni/examples/trials/mnist-annotation codeDir: /usr/share/nni/examples/trials/mnist-annotation
trialGpuNum: 0 gpuNum: 0
``` ```
For our built-in assessors, you need to fill two fields: `assessorName` which chooses NNI provided assessors (refer to [here]() for built-in assessors), `optimizationMode` which includes Maximize and Minimize (you want to maximize or minimize your trial result). For our built-in assessors, you need to fill two fields: `builtinAssessorName` which chooses NNI provided assessors (refer to [here]() for built-in assessors), `optimize_mode` which includes maximize and minimize (you want to maximize or minimize your trial result).
## Using user customized Assessor ## Using user customized Assessor
You can also write your own assessor following the guidance [here](). For example, you wrote an assessor for `examples/trials/mnist-annotation`. You should prepare the yaml configure below: You can also write your own assessor following the guidance [here](). For example, you wrote an assessor for `examples/trials/mnist-annotation`. You should prepare the yaml configure below:
...@@ -46,15 +48,25 @@ trainingServicePlatform: local ...@@ -46,15 +48,25 @@ trainingServicePlatform: local
# choice: true, false # choice: true, false
useAnnotation: true useAnnotation: true
tuner: tuner:
tunerName: TPE # Possible values: TPE, Random, Anneal, Evolution
optimizationMode: Maximize builtinTunerName: TPE
classArgs:
optimize_mode: maximize
assessor: assessor:
assessorCommand: your_command # Your assessor code directory
assessorCodeDir: /path/of/your/asessor codeDir:
assessorGpuNum: 0 # Name of the file which contains your assessor class
classFileName:
# Your assessor class name, must be a subclass of nni.Assessor
className:
# Parameter names and literal values you want to pass to
# the __init__ constructor of your assessor class
classArgs:
arg1: value1
gpuNum: 0
trial: trial:
trialCommand: python mnist.py command: python mnist.py
trialCodeDir: /usr/share/nni/examples/trials/mnist-annotation codeDir: /usr/share/nni/examples/trials/mnist-annotation
trialGpuNum: 0 gpuNum: 0
``` ```
You only need to fill three field: `assessorCommand`, `assessorCodeDir` and `assessorGpuNum`. You need to fill: `codeDir`, `classFileName`, `className`, and pass parameters to \_\_init__ constructor through `classArgs` field if the \_\_init__ constructor of your assessor class has required parameters.
\ No newline at end of file
...@@ -19,14 +19,15 @@ searchSpacePath: ...@@ -19,14 +19,15 @@ searchSpacePath:
useAnnotation: useAnnotation:
tuner: tuner:
#choice: TPE, Random, Anneal, Evolution #choice: TPE, Random, Anneal, Evolution
tunerName: builtinTunerName:
#choice: Maximize, Minimize classArgs:
optimizationMode: #choice: maximize, minimize
tunerGpuNum: optimize_mode:
gpuNum:
trial: trial:
trialCommand: command:
trialCodeDir: codeDir:
trialGpuNum: gpuNum:
#machineList can be empty if the platform is local #machineList can be empty if the platform is local
machineList: machineList:
- ip: - ip:
...@@ -48,20 +49,22 @@ searchSpacePath: ...@@ -48,20 +49,22 @@ searchSpacePath:
useAnnotation: useAnnotation:
tuner: tuner:
#choice: TPE, Random, Anneal, Evolution #choice: TPE, Random, Anneal, Evolution
tunerName: builtinTunerName:
#choice: Maximize, Minimize classArgs:
optimizationMode: #choice: maximize, minimize
tunerGpuNum: optimize_mode:
gpuNum:
assessor: assessor:
#choice: Medianstop #choice: Medianstop
assessorName: builtinAssessorName:
#choice: Maximize, Minimize classArgs:
optimizationMode: #choice: maximize, minimize
assessorGpuNum: optimize_mode:
gpuNum:
trial: trial:
trialCommand: command:
trialCodeDir: codeDir:
trialGpuNum: gpuNum:
#machineList can be empty if the platform is local #machineList can be empty if the platform is local
machineList: machineList:
- ip: - ip:
...@@ -82,20 +85,22 @@ trainingServicePlatform: ...@@ -82,20 +85,22 @@ trainingServicePlatform:
useAnnotation: useAnnotation:
tuner: tuner:
#choice: TPE, Random, Anneal, Evolution #choice: TPE, Random, Anneal, Evolution
tunerName: builtinTunerName:
#choice: Maximize, Minimize classArgs:
optimizationMode: #choice: maximize, minimize
tunerGpuNum: optimize_mode:
gpuNum:
assessor: assessor:
#choice: Medianstop #choice: Medianstop
assessorName: builtinAssessorName:
#choice: Maximize, Minimize classArgs:
optimizationMode: #choice: maximize, minimize
assessorGpuNum: optimize_mode:
gpuNum:
trial: trial:
trialCommand: command:
trialCodeDir: codeDir:
trialGpuNum: gpuNum:
#machineList can be empty if the platform is local #machineList can be empty if the platform is local
machineList: machineList:
- ip: - ip:
...@@ -108,11 +113,13 @@ machineList: ...@@ -108,11 +113,13 @@ machineList:
* Description * Description
__authorName__ is the name of the author who create the experiment. __authorName__ is the name of the author who create the experiment.
TBD: add default value
* __experimentName__ * __experimentName__
* Description * Description
__experimentName__ is the name of the experiment you created. __experimentName__ is the name of the experiment you created.
TBD: add default value
* __trialConcurrency__ * __trialConcurrency__
* Description * Description
...@@ -155,24 +162,30 @@ machineList: ...@@ -155,24 +162,30 @@ machineList:
* __tuner__ * __tuner__
* Description * Description
__tuner__ specifies the tuner algorithm you use to run an experiment, there are two kinds of ways to set tuner. One way is to use tuner provided by nni sdk, you just need to set __tunerName__ and __optimizationMode__. Another way is to use your own tuner file, and you need to set __tunerCommand__, __tunerCwd__. __tuner__ specifies the tuner algorithm you use to run an experiment, there are two kinds of ways to set tuner. One way is to use tuner provided by nni sdk, you just need to set __builtinTunerName__ and __classArgs__. Another way is to use your own tuner file, and you need to set __codeDirectory__, __classFileName__, __className__ and __classArgs__.
* __tunerName__ and __optimizationMode__ * __builtinTunerName__ and __classArgs__
* __tunerName__ * __builtinTunerName__
__tunerName__ specifies the name of system tuner you want to use, nni sdk provides four kinds of tuner, including {__TPE__, __Random__, __Anneal__, __Evolution__} __builtinTunerName__ specifies the name of system tuner you want to use, nni sdk provides four kinds of tuner, including {__TPE__, __Random__, __Anneal__, __Evolution__}
* __optimizationMode__ * __classArgs__
__optimizationMode__ specifies the optimization mode of tuner algorithm, including {__Maximize__, __Minimize__} __classArgs__ specifies the arguments of tuner algorithm
* __tunerCommand__ and __tunerCwd__ * __codeDir__, __classFileName__, __className__ and __classArgs__
* __tunerCommand__ * __codeDir__
__tunerCommand__ specifies the command you want to use to run your own tuner file, for example {__python3 mytuner.py__} __codeDir__ specifies the directory of tuner code.
* __tunerCwd__ * __classFileName__
__tunerCwd__ specifies the working directory of your own tuner file, which is the path of your own tuner file. __classFileName__ specifies the name of tuner file.
* __tunerGpuNum__ * __className__
__tunerGPUNum__ specifies the gpu number you want to use to run the tuner process. The value of this field should be a positive number. __className__ specifies the name of tuner class.
* __classArgs__
__classArgs__ specifies the arguments of tuner algorithm.
* __gpuNum__
__gpuNum__ specifies the gpu number you want to use to run the tuner process. The value of this field should be a positive number.
Note: you could only specify one way to set tuner, for example, you could set {tunerName, optimizationMode} or {tunerCommand, tunerCwd}, and you could not set them both. Note: you could only specify one way to set tuner, for example, you could set {tunerName, optimizationMode} or {tunerCommand, tunerCwd}, and you could not set them both.
...@@ -180,36 +193,42 @@ machineList: ...@@ -180,36 +193,42 @@ machineList:
* Description * Description
__assessor__ specifies the assessor algorithm you use to run experiment, there are two kinds of ways to set assessor. One way is to use assessor provided by nni sdk, you just need to set __assessorName__ and __optimizationMode__. Another way is to use your own assessor file, and you need to set __assessorCommand__, __assessorCwd__. __assessor__ specifies the assessor algorithm you use to run an experiment, there are two kinds of ways to set assessor. One way is to use assessor provided by nni sdk, you just need to set __builtinAssessorName__ and __classArgs__. Another way is to use your own tuner file, and you need to set __codeDirectory__, __classFileName__, __className__ and __classArgs__.
* __assessorName__ and __optimizationMode__ * __builtinAssessorName__ and __classArgs__
* __assessorName__ * __builtinAssessorName__
__builtinAssessorName__ specifies the name of system assessor you want to use, nni sdk provides four kinds of tuner, including {__TPE__, __Random__, __Anneal__, __Evolution__}
* __classArgs__
__assessorName__ specifies the name of system assessor you want to use, nni sdk provides one kind of assessor, which is {__Medianstop__}. __classArgs__ specifies the arguments of tuner algorithm
* __optimizationMode__ * __codeDir__, __classFileName__, __className__ and __classArgs__
* __codeDir__
__optimizationMode__ specifies the optimization mode of tuner algorithm, including {__Maximize__, __Minimize__} __codeDir__ specifies the directory of tuner code.
* __assessorCommand__ and __assessorCwd__ * __classFileName__
* __assessorCommand__
__assessorCommand__ specifies the command you want to use to run your own assessor file, for example {__python3 myassessor.py__} __classFileName__ specifies the name of tuner file.
* __assessorCwd__ * __className__
__assessorCwd__ specifies the working directory of your own assessor file, which is the path of your own assessor file. __className__ specifies the name of tuner class.
* __assessorGpuNum__ * __classArgs__
__assessorGPUNum__ specifies the gpu number you want to use to run the assessor process. The value of this field should be a positive number. __classArgs__ specifies the arguments of tuner algorithm.
* __gpuNum__
Note: you could only specify one way to set assessor, for example, you could set {assessorName, optimizationMode} or {assessorCommand, assessorCwd}, and you could not set them both.If you do not want to use assessor, you just need to leave assessor empty or remove assessor in your config file. __gpuNum__ specifies the gpu number you want to use to run the assessor process. The value of this field should be a positive number.
Note: you could only specify one way to set assessor, for example, you could set {assessorName, optimizationMode} or {assessorCommand, assessorCwd}, and you could not set them both.If you do not want to use assessor, you just need to leave assessor empty or remove assessor in your config file. Default value is 0.
* __trial__ * __trial__
* __trialCommand__ * __command__
__trialCommand__ specifies the command to run trial process. __command__ specifies the command to run trial process.
* __trialCodeDir__ * __codeDir__
__trialCodeDir__ specifies the directory of your own trial file. __codeDir__ specifies the directory of your own trial file.
* __trialGpuNum__ * __gpuNum__
__trialGpuNum__ specifies the num of gpu you want to use to run your trial process. __gpuNum__ specifies the num of gpu you want to use to run your trial process. Default value is 0.
* __machineList__ * __machineList__
__machineList__ should be set if you set __trainingServicePlatform__=remote, or it could be empty. __machineList__ should be set if you set __trainingServicePlatform__=remote, or it could be empty.
...@@ -228,6 +247,17 @@ machineList: ...@@ -228,6 +247,17 @@ machineList:
__passwd__ specifies the password of your account. __passwd__ specifies the password of your account.
* __sshKeyPath__
If you want to use ssh key to login remote machine, you could set __sshKeyPath__ in config file. __sshKeyPath__ is the path of ssh key file, which should be valid.
Note: if you set passwd and sshKeyPath simultaneously, nni will try passwd.
* __passphrase__
__passphrase__ is used to protect ssh key, which could be empty if you don't have passphrase.
## Examples ## Examples
* __local mode__ * __local mode__
...@@ -244,14 +274,15 @@ trainingServicePlatform: local ...@@ -244,14 +274,15 @@ trainingServicePlatform: local
useAnnotation: true useAnnotation: true
tuner: tuner:
#choice: TPE, Random, Anneal, Evolution #choice: TPE, Random, Anneal, Evolution
tunerName: TPE builtinTunerName: TPE
#choice: Maximize, Minimize classArgs:
optimizationMode: Maximize #choice: maximize, minimize
tunerGpuNum: 0 optimize_mode: maximize
gpuNum: 0
trial: trial:
trialCommand: python3 mnist.py command: python3 mnist.py
trialCodeDir: /nni/mnist codeDir: /nni/mnist
trialGpuNum: 0 gpuNum: 0
``` ```
If you want to use assessor, you could add assessor configuration in your file. If you want to use assessor, you could add assessor configuration in your file.
...@@ -268,20 +299,22 @@ searchSpacePath: /nni/search_space.json ...@@ -268,20 +299,22 @@ searchSpacePath: /nni/search_space.json
useAnnotation: false useAnnotation: false
tuner: tuner:
#choice: TPE, Random, Anneal, Evolution #choice: TPE, Random, Anneal, Evolution
tunerName: TPE builtinTunerName: TPE
#choice: Maximize, Minimize classArgs:
optimizationMode: Maximize #choice: maximize, minimize
tunerGpuNum: 0 optimize_mode: maximize
gpuNum: 0
assessor: assessor:
#choice: Medianstop #choice: Medianstop
assessorName: Medianstop builtinAssessorName: Medianstop
#choice: Maximize, Minimize classArgs:
optimizationMode: Maximize #choice: maximize, minimize
assessorGpuNum: 0 optimize_mode: maximize
gpuNum: 0
trial: trial:
trialCommand: python3 mnist.py command: python3 mnist.py
trialCodeDir: /nni/mnist codeDir: /nni/mnist
trialGpuNum: 0 gpuNum: 0
``` ```
Or you could specify your own tuner and assessor file as following: Or you could specify your own tuner and assessor file as following:
...@@ -297,17 +330,25 @@ searchSpacePath: /nni/search_space.json ...@@ -297,17 +330,25 @@ searchSpacePath: /nni/search_space.json
#choice: true, false #choice: true, false
useAnnotation: false useAnnotation: false
tuner: tuner:
tunerCommand: python3 mytuner.py codeDir: /nni/tuner
tunerCwd: /nni/tuner classFileName: mytuner.py
tunerGpuNum: 0 className: MyTuner
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
gpuNum: 0
assessor: assessor:
assessorCommand: python3 myassessor.py codeDir: /nni/assessor
assessorCwd: /nni/assessor classFileName: myassessor.py
assessorGpuNum: 0 className: MyAssessor
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
gpuNum: 0
trial: trial:
trialCommand: python3 mnist.py command: python3 mnist.py
trialCodeDir: /nni/mnist codeDir: /nni/mnist
trialGpuNum: 0 gpuNum: 0
``` ```
* __remote mode__ * __remote mode__
...@@ -326,14 +367,15 @@ searchSpacePath: /nni/search_space.json ...@@ -326,14 +367,15 @@ searchSpacePath: /nni/search_space.json
useAnnotation: false useAnnotation: false
tuner: tuner:
#choice: TPE, Random, Anneal, Evolution #choice: TPE, Random, Anneal, Evolution
tunerName: TPE builtinTunerName: TPE
#choice: Maximize, Minimize classArgs:
optimizationMode: Maximize #choice: maximize, minimize
tunerGpuNum: 0 optimize_mode: maximize
gpuNum: 0
trial: trial:
trialCommand: python3 mnist.py command: python3 mnist.py
trialCodeDir: /nni/mnist codeDir: /nni/mnist
trialGpuNum: 0 gpuNum: 0
#machineList can be empty if the platform is local #machineList can be empty if the platform is local
machineList: machineList:
- ip: 10.10.10.10 - ip: 10.10.10.10
...@@ -347,5 +389,6 @@ machineList: ...@@ -347,5 +389,6 @@ machineList:
- ip: 10.10.10.12 - ip: 10.10.10.12
port: 22 port: 22
username: test username: test
passwd: test sshKeyPath: /nni/sshkey
passphrase: qwert
``` ```
\ No newline at end of file
**Getting Started with NNI** **Getting Started with NNI**
=== ===
NNI (Nerual Network Intelligance) is a toolkit to help users running automated machine learning experiment.
The tool dispatchs and runs trail jobs that generated by tunning algorithms to search the best neural architecture and/or hyper-parameters at different enviroments (e.g. local, remote servers, Cloud).
``` ## **Installation**
AutoML experiment Training Services
┌────────┐ ┌────────────────────────┐ ┌────────────────┐
│ nnictl │ ─────> │ nni_manager │ │ Local Machine │
└────────┘ │ sdk/tuner │ └────────────────┘
│ hyperopt_tuner │
│ evlution_tuner │ trail jobs ┌────────────────┐
│ ... │ ────────> │ Remote Servers │
├────────────────────────┤ └────────────────┘
│ trail job source code │
│ sdk/annotation │ ┌────────────────┐
├────────────────────────┤ │ Yarn,K8s, │
│ nni_board │ │ ... │
└────────────────────────┘ └────────────────┘
```
## **Who should consider using NNI**
* You want to try different AutoML algorithms for your training code (model) at local
* You want to run AutoML trail jobs in different enviroments to speed up search (e.g. remote servers, Cloud)
* As a reseacher and data scientist, you want to implement your own AutoML algorithms and compare with other algorithms
* As a ML platform owner, you want to support AutoML in your platform
## **Setup**
* __Dependencies__ * __Dependencies__
nni requires:
```
python >= 3.5
node >= 10.9.0
yarn >= 1.9.4
```
Before install nni, please make sure you have installed python environment correctly.
* __User installation__
* clone nni repository python >= 3.5
git clone https://github.com/Microsoft/NeuralNetworkIntelligence python pip should also be correctly installed. You could use "which pip" or "pip -V" to check in Linux.
TBD: For now, we don's support virtual environment.
* run install.sh * __Install NNI through pip__
cd NeuralNetworkIntelligence pip3 install -v --user git+https://github.com/Microsoft/NeuralNetworkIntelligence.git
sh ./install.sh source ~/.bashrc
For more details about installation, please refer to [Installation instructions](Installation.md). * __Install NNI through source code__
## **Quick start: run an experiment at local** git clone https://github.com/Microsoft/NeuralNetworkIntelligence
Requirements: cd NeuralNetworkIntelligence
* local enviroment setup [TODO] chmod +x install.sh
source install.sh
Run the following command to create an experiemnt for [mnist]
```bash
nnictl create --config /usr/share/nni/examples/trials/mnist-annotation/config.yml
```
This command will start the experiment and WebUI. The WebUI endpoint will be shown in the output of this command (for example, `http://localhost:8080`). Open this URL using your browsers. You can analyze your experiment through WebUI, or open trials' tensorboard.
## **Quick start: run a customized experiment** ## **Quick start: run a customized experiment**
An experiment is to run multiple trial jobs, each trial job tries a configuration which includes a specific neural architecture (or model) and hyper-parameter values. To run an experiment through NNI, you should: An experiment is to run multiple trial jobs, each trial job tries a configuration which includes a specific neural architecture (or model) and hyper-parameter values. To run an experiment through NNI, you should:
...@@ -64,48 +30,54 @@ An experiment is to run multiple trial jobs, each trial job tries a configuratio ...@@ -64,48 +30,54 @@ An experiment is to run multiple trial jobs, each trial job tries a configuratio
* Provide a yaml experiment configure file * Provide a yaml experiment configure file
* (optional) Provide or choose an assessor * (optional) Provide or choose an assessor
**Prepare trial**: Let's use a simple trial example, e.g. mnist, provided by NNI. After you installed NNI, NNI examples have been put in /usr/share/nni/examples, run `ls /usr/share/nni/examples/trials` to see all the trial examples. You can simply execute the following command to run the NNI mnist example: **Prepare trial**: Let's use a simple trial example, e.g. mnist, provided by NNI. After you installed NNI, NNI examples have been put in ~/nni/examples, run `ls ~/nni/examples/trials` to see all the trial examples. You can simply execute the following command to run the NNI mnist example:
python /usr/share/nni/examples/trials/mnist-annotation/mnist.py python ~/nni/examples/trials/mnist-annotation/mnist.py
This command will be filled in the yaml configure file below. Please refer to [here]() for how to write your own trial. This command will be filled in the yaml configure file below. Please refer to [here]() for how to write your own trial.
**Prepare tuner**: NNI supports several popular automl algorithms, including Random Search, Tree of Parzen Estimators (TPE), Bayesian Optimization etc. Users can write their own tuner (refer to [here]()), but for simplicity, here we can choose a tuner provided by NNI as below: **Prepare tuner**: NNI supports several popular automl algorithms, including Random Search, Tree of Parzen Estimators (TPE), Evolution algorithm etc. Users can write their own tuner (refer to [here]()), but for simplicity, here we choose a tuner provided by NNI as below:
tunerName: TPE tunerName: TPE
optimizationMode: maximize optimizationMode: maximize
*tunerName* is used to specify a tuner in NNI, *optimizationMode* is to indicate whether you want to maximize or minimize your trial's result. *tunerName* is used to specify a tuner in NNI, *optimizationMode* is to indicate whether you want to maximize or minimize your trial's result.
**Prepare configure file**: Since you have already known which trial code you are going to run and which tuner you are going to use, it is time to prepare the yaml configure file. NNI provides a demo configure file for each trial example, `cat /usr/share/nni/examples/trials/mnist-annotation/config.yml` to see it. Its content is basically shown below: **Prepare configure file**: Since you have already known which trial code you are going to run and which tuner you are going to use, it is time to prepare the yaml configure file. NNI provides a demo configure file for each trial example, `cat ~/nni/examples/trials/mnist-annotation/config.yml` to see it. Its content is basically shown below:
``` ```
authorName: your_name authorName: your_name
experimentName: auto_mnist experimentName: auto_mnist
# how many trials could be concurrently running # how many trials could be concurrently running
trialConcurrency: 2 trialConcurrency: 2
# maximum experiment running duration # maximum experiment running duration
maxExecDuration: 3h maxExecDuration: 3h
# empty means never stop # empty means never stop
maxTrialNum: 100 maxTrialNum: 100
# choice: local, remote # choice: local, remote
trainingServicePlatform: local trainingServicePlatform: local
# choice: true, false # choice: true, false
useAnnotation: true useAnnotation: true
tuner: tuner:
tunerName: TPE builtinTunerName: TPE
optimizationMode: Maximize classArgs:
optimize_mode: maximize
trial: trial:
trialCommand: python mnist.py command: python mnist.py
trialCodeDir: /usr/share/nni/examples/trials/mnist-annotation codeDir: ~/nni/examples/trials/mnist-annotation
trialGpuNum: 0 gpuNum: 0
``` ```
Here *useAnnotation* is true because this trial example uses our python annotation (refer to [here]() for details). For trial, we should provide *trialCommand* which is the command to run the trial, provide *trialCodeDir* where the trial code is. The command will be executed in this directory. We should also provide how many GPUs a trial requires. Here *useAnnotation* is true because this trial example uses our python annotation (refer to [here]() for details). For trial, we should provide *trialCommand* which is the command to run the trial, provide *trialCodeDir* where the trial code is. The command will be executed in this directory. We should also provide how many GPUs a trial requires.
With all these steps done, we can run the experiment with the following command: With all these steps done, we can run the experiment with the following command:
nnictl create --config /usr/share/nni/examples/trials/mnist-annotation/config.yml nnictl create --config ~/nni/examples/trials/mnist-annotation/config.yml
You can refer to [here](NNICTLDOC.md) for more usage guide of *nnictl* command line tool. You can refer to [here](NNICTLDOC.md) for more usage guide of *nnictl* command line tool.
...@@ -118,11 +90,8 @@ The experiment has been running now, NNI provides WebUI for you to view experime ...@@ -118,11 +90,8 @@ The experiment has been running now, NNI provides WebUI for you to view experime
* [Tuners supported by NNI.](../src/sdk/pynni/nni/README.md) * [Tuners supported by NNI.](../src/sdk/pynni/nni/README.md)
* [How to enable early stop (i.e. assessor) in an experiment?](EnableAssessor.md) * [How to enable early stop (i.e. assessor) in an experiment?](EnableAssessor.md)
* [How to run an experiment on multiple machines?](RemoteMachineMode.md) * [How to run an experiment on multiple machines?](RemoteMachineMode.md)
* [How to write a customized tuner?](../examples/tuners/README.md) * [How to write a customized tuner?](CustomizedTuner.md)
* [How to write a customized assessor?](../examples/assessors/README.md) * [How to write a customized assessor?](../examples/assessors/README.md)
* [How to resume an experiment?]() * [How to resume an experiment?](NNICTLDOC.md)
* [Tutorial of the command tool *nnictl*.](NNICTLDOC.md) * [Tutorial of the command tool *nnictl*.](NNICTLDOC.md)
* [How to use *nnictl* to control multiple experiments?]() * [How to use *nnictl* to control multiple experiments?]()
## How to contribute
TBD
...@@ -8,12 +8,10 @@ nnictl support commands: ...@@ -8,12 +8,10 @@ nnictl support commands:
``` ```
nnictl create nnictl create
nnictl stop nnictl stop
nnictl create
nnictl update nnictl update
nnictl resume nnictl resume
nnictl trial nnictl trial
nnictl webui nnictl webui
nnictl rest
nnictl experiment nnictl experiment
nnictl config nnictl config
nnictl log nnictl log
...@@ -72,7 +70,7 @@ nnictl log ...@@ -72,7 +70,7 @@ nnictl log
* __nnictl update searchspace__ * __nnictl update searchspace__
* Description * Description
You can use this command to update an experiment's searchspace. You can use this command to update an experiment's search space.
* Usage * Usage
...@@ -201,14 +199,6 @@ nnictl log ...@@ -201,14 +199,6 @@ nnictl log
nnictl config show nnictl config show
### Manage restful server
* __nnictl rest check__
* Description
Check the status of restful server
* Usage
nnictl rest check
### Manage log ### Manage log
* __nnictl log stdout__ * __nnictl log stdout__
......
# Release 0.1.0 - 9/15/2018
Initial release of Neural Network Intelligence (NNI).
## Major Features
* Installation and Deployment
* Support pip install and source codes install
* Support training services on local mode(including Multi-GPU mode) as well as multi-machines mode
* Tuners, Accessors and Trial
* Support AutoML algorithms including: hyperopt_tpe, hyperopt_annealing, hyperopt_random, and evolution_tuner
* Support assessor(early stop) algorithms including: medianstop algorithm
* Provide Python API for user defined tuners and accessors
* Provide Python API for user to wrap trial code as NNI deployable codes
* Experiments
* Provide a command line toolkit 'nnictl' for experiments management
* Provide a web UI for viewing experiments details and managing experiments
* Continuous Integration
* Support CI by providing out-of-box integration with [travis-ci](https://github.com/travis-ci) on ubuntu
* Others
* Support simple GPU job scheduling
\ No newline at end of file
...@@ -10,6 +10,18 @@ NNI supports running an experiment on multiple machines, called remote machine m ...@@ -10,6 +10,18 @@ NNI supports running an experiment on multiple machines, called remote machine m
## Setup environment ## Setup environment
Install NNI on each of your machines following the install guide [here](GetStarted.md). Install NNI on each of your machines following the install guide [here](GetStarted.md).
For remote machines that are used only to run trials but not the nnictl, you can just install python SDK:
* __Install python SDK through pip__
pip3 install --user git+https://github.com/Microsoft/NeuralNetworkIntelligence.git#subdirectory=src/sdk/pynni
* __Install python SDK through source code__
git clone https://github.com/Microsoft/NeuralNetworkIntelligence
cd src/sdk/pynni
python3 setup.py install
## Run an experiment ## Run an experiment
Still using `examples/trials/mnist-annotation` as an example here. The yaml file you need is shown below: Still using `examples/trials/mnist-annotation` as an example here. The yaml file you need is shown below:
``` ```
...@@ -26,12 +38,13 @@ trainingServicePlatform: local ...@@ -26,12 +38,13 @@ trainingServicePlatform: local
# choice: true, false # choice: true, false
useAnnotation: true useAnnotation: true
tuner: tuner:
tunerName: TPE builtinTunerName: TPE
optimizationMode: Maximize classArgs:
optimize_mode: maximize
trial: trial:
trialCommand: python mnist.py command: python mnist.py
trialCodeDir: /usr/share/nni/examples/trials/mnist-annotation codeDir: /usr/share/nni/examples/trials/mnist-annotation
trialGpuNum: 0 gpuNum: 0
#machineList can be empty if the platform is local #machineList can be empty if the platform is local
machineList: machineList:
- ip: 10.1.1.1 - ip: 10.1.1.1
......
## How to define search space?
### Hyper-parameter Search Space
* A search space configure example as follow:
```python
{
"dropout_rate":{"_type":"uniform","_value":[0.1,0.5]},
"conv_size":{"_type":"choice","_value":[2,3,5,7]},
"hidden_size":{"_type":"choice","_value":[124, 512, 1024]},
"batch_size":{"_type":"choice","_value":[50, 250, 500]},
"learning_rate":{"_type":"uniform","_value":[0.0001, 0.1]}
}
```
The example define ```dropout_rate``` as variable which priori distribution is uniform distribution, and its value from ```0.1``` and ```0.5```.
The tuner will sample parameters/architecture by understanding the search space first.
User should define the name of variable, type and candidate value of variable.
The candidate type and value for variable is here:
* {"_type":"choice","_value":options}
* Which means the variable value is one of the options, which should be a list The elements of options can themselves be [nested] stochastic expressions. In this case, the stochastic choices that only appear in some of the options become conditional parameters.
<br/>
* {"_type":"randint","_value":[upper]}
* Which means the variable value is a random integer in the range [0, upper). The semantics of this distribution is that there is no more correlation in the loss function between nearby integer values, as compared with more distant integer values. This is an appropriate distribution for describing random seeds for example. If the loss function is probably more correlated for nearby integer values, then you should probably use one of the "quantized" continuous distributions, such as either quniform, qloguniform, qnormal or qlognormal.
<br/>
* {"_type":"uniform","_value":[low, high]}
* Which means the variable value is a value uniformly between low and high.
* When optimizing, this variable is constrained to a two-sided interval.
<br/>
* {"_type":"quniform","_value":[low, high, q]}
* Which means the variable value is a value like round(uniform(low, high) / q) * q
* Suitable for a discrete value with respect to which the objective is still somewhat "smooth", but which should be bounded both above and below.
<br/>
* {"_type":"loguniform","_value":[low, high]}
* Which means the variable value is a value drawn according to exp(uniform(low, high)) so that the logarithm of the return value is uniformly distributed.
* When optimizing, this variable is constrained to the interval [exp(low), exp(high)].
<br/>
* {"_type":"qloguniform","_value":[low, high, q]}
* Which means the variable value is a value like round(exp(uniform(low, high)) / q) * q
* Suitable for a discrete variable with respect to which the objective is "smooth" and gets smoother with the size of the value, but which should be bounded both above and below.
<br/>
* {"_type":"normal","_value":[label, mu, sigma]}
* Which means the variable value is a real value that's normally-distributed with mean mu and standard deviation sigma. When optimizing, this is an unconstrained variable.
<br/>
* {"_type":"qnormal","_value":[label, mu, sigma, q]}
* Which means the variable value is a value like round(normal(mu, sigma) / q) * q
* Suitable for a discrete variable that probably takes a value around mu, but is fundamentally unbounded.
<br/>
* {"_type":"lognormal","_value":[label, mu, sigma]}
* Which means the variable value is a value drawn according to exp(normal(mu, sigma)) so that the logarithm of the return value is normally distributed. When optimizing, this variable is constrained to be positive.
<br/>
* {"_type":"qlognormal","_value":[label, mu, sigma, q]}
* Which means the variable value is a value like round(exp(normal(mu, sigma)) / q) * q
* Suitable for a discrete variable with respect to which the objective is smooth and gets smoother with the size of the variable, which is bounded from one side.
<br/>
\ No newline at end of file
## How to contribute
TBD
\ No newline at end of file
...@@ -2,39 +2,50 @@ ...@@ -2,39 +2,50 @@
=== ===
There would be only a few changes on your existing trial(model) code to make the code runnable on NNI. We provide two approaches for you to modify your code: `Python annotation` and `NNI APIs for trial` There would be only a few changes on your existing trial(model) code to make the code runnable on NNI. We provide two approaches for you to modify your code: `Python annotation` and `NNI APIs for trial`
## Python annotation ## NNI APIs
We designed a new syntax for users to annotation which variable they want to tune and in what range they want to tune the variable. Also, they can annotate which variable they want to report as intermediate result to `assessor`, and which variable to report as the final result (e.g. model accuracy) to `tuner`. A really appealing feature of our python annotation is that it exists as comments in your code, which means you can run your code as before without NNI. Let's look at an example, below is a piece of tensorflow code: We also support NNI APIs for trial code. By using this approach, you should first prepare a search space file. An example is shown below:
``` ```
with tf.Session() as sess: {
sess.run(tf.global_variables_initializer()) "dropout_rate":{"_type":"uniform","_value":[0.1,0.5]},
batch_size = 128 "conv_size":{"_type":"choice","_value":[2,3,5,7]},
for i in range(10000): "hidden_size":{"_type":"choice","_value":[124, 512, 1024]},
batch = mnist.train.next_batch(batch_size) "learning_rate":{"_type":"uniform","_value":[0.0001, 0.1]}
dropout_rate = 0.5 }
mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0], ```
mnist_network.labels: batch[1], You can refer to [here](SearchSpaceSpec.md) for the tutorial of search space.
mnist_network.keep_prob: dropout_rate})
if i % 100 == 0:
test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0})
test_acc = mnist_network.accuracy.eval( Then, include `import nni` in your trial code to use NNI APIs. Using the line:
feed_dict={mnist_network.images: mnist.test.images, ```
mnist_network.labels: mnist.test.labels, RECEIVED_PARAMS = nni.get_parameters()
mnist_network.keep_prob: 1.0})
``` ```
to get hyper-parameters' values assigned by tuner. `RECEIVED_PARAMS` is an object, for example:
```
{"conv_size": 2, "hidden_size": 124, "learning_rate": 0.0307, "dropout_rate": 0.2029}
```
On the other hand, you can use the API: `nni.report_intermediate_result(accuracy)` to send `accuracy` to assessor. And use `nni.report_final_result(accuracy)` to send `accuracy` to tuner. Here `accuracy` could be any python data type, but **NOTE that if you use built-in tuner/assessor, `accuracy` should be a numerical variable(e.g. float, int)**.
Let's say you want to tune batch\_size and dropout\_rate, and report test\_acc every 100 steps, at last report test\_acc as final result. With our python annotation, your code would look like below: The assessor will decide which trial should early stop based on the history performance of trial(intermediate result of one trial).
The tuner will generate next parameters/architecture based on the explore history(final result of all trials).
In the yaml configure file, you need two lines to enable NNI APIs:
```
useAnnotation: false
searchSpacePath: /path/to/your/search_space.json
``` ```
You can refer to [here](../examples/trials/README.md) for more information about how to write trial code using NNI APIs.
## NNI Annotation
We designed a new syntax for users to annotate the variables they want to tune and in what range they want to tune the variables. Also, they can annotate which variable they want to report as intermediate result to `assessor`, and which variable to report as the final result (e.g. model accuracy) to `tuner`. A really appealing feature of our NNI annotation is that it exists as comments in your code, which means you can run your code as before without NNI. Let's look at an example, below is a piece of tensorflow code:
```diff
with tf.Session() as sess: with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) sess.run(tf.global_variables_initializer())
"""@nni.variable(nni.choice(50, 250, 500), name=batch_size)""" + """@nni.variable(nni.choice(50, 250, 500), name=batch_size)"""
batch_size = 128 batch_size = 128
for i in range(10000): for i in range(10000):
batch = mnist.train.next_batch(batch_size) batch = mnist.train.next_batch(batch_size)
"""@nni.variable(nni.choice(1, 5), name=dropout_rate)""" + """@nni.variable(nni.choice(1, 5), name=dropout_rate)"""
dropout_rate = 0.5 dropout_rate = 0.5
mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0], mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0],
mnist_network.labels: batch[1], mnist_network.labels: batch[1],
...@@ -44,47 +55,23 @@ with tf.Session() as sess: ...@@ -44,47 +55,23 @@ with tf.Session() as sess:
feed_dict={mnist_network.images: mnist.test.images, feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels, mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0}) mnist_network.keep_prob: 1.0})
"""@nni.report_intermediate_result(test_acc)""" + """@nni.report_intermediate_result(test_acc)"""
test_acc = mnist_network.accuracy.eval( test_acc = mnist_network.accuracy.eval(
feed_dict={mnist_network.images: mnist.test.images, feed_dict={mnist_network.images: mnist.test.images,
mnist_network.labels: mnist.test.labels, mnist_network.labels: mnist.test.labels,
mnist_network.keep_prob: 1.0}) mnist_network.keep_prob: 1.0})
"""@nni.report_final_result(test_acc)""" + """@nni.report_final_result(test_acc)"""
``` ```
Simply adding four lines would make your code runnable on NNI. You can still run your code independently. `@nni.variable` works on its next line assignment, and `@nni.report_intermediate_result`/`@nni.report_final_result` would send the data to assessor/tuner at that line. Please refer to [here](../tools/annotation/README.md) for more annotation syntax and more powerful usage. In the yaml configure file, you need one line to enable Python annotation: Let's say you want to tune batch\_size and dropout\_rate, and report test\_acc every 100 steps, at last report test\_acc as final result. With our NNI annotation, your code would look like below:
```
useAnnotation: true
```
## NNI APIs for trial
We also support NNI APIs for trial code. By using this approach, you should first prepare a search space file. An example is shown below:
```
{
"dropout_rate":{"_type":"uniform","_value":[0.1,0.5]},
"conv_size":{"_type":"choice","_value":[2,3,5,7]},
"hidden_size":{"_type":"choice","_value":[124, 512, 1024]},
"learning_rate":{"_type":"uniform","_value":[0.0001, 0.1]}
}
```
You can refer to [here]() for the tutorial of search space.
Then, include `import nni` in your trial code to use APIs. Using the line: Simply adding four lines would make your code runnable on NNI. You can still run your code independently. `@nni.variable` works on its next line assignment, and `@nni.report_intermediate_result`/`@nni.report_final_result` would send the data to assessor/tuner at that line. Please refer to [here](../tools/annotation/README.md) for more annotation syntax and more powerful usage. In the yaml configure file, you need one line to enable NNI annotation:
``` ```
RECEIVED_PARAMS = nni.get_parameters() useAnnotation: true
```
to get hyper-parameters' values assigned by tuner. `RECEIVED_PARAMS` is a json object, for example:
```
{'conv_size': 2, 'hidden_size': 124, 'learning_rate': 0.0307, 'dropout_rate': 0.2029}
``` ```
On the other hand, you can use the API: `nni.report_intermediate_result(accuracy)` to send `accuracy` to assessor. And use `nni.report_final_result(accuracy)` to send `accuracy` to tuner. Here `accuracy` could be any python data type, but **NOTE that if you use built-in tuner/assessor, `accuracy` should be a number (e.g. float, int)**. For users to correctly leverage NNI annotation, we briefly introduce how NNI annotation works here: NNI precompiles users' trial code to find all the annotations each of which is one line with `"""@nni` at the head of the line. Then NNI replaces each annotation with a corresponding NNI API at the location where the annotation is.
In the yaml configure file, you need two lines to enable NNI APIs: **Note that: in your trial code, you can use either one of NNI APIs and NNI annotation, but not both of them simultaneously.**
``` \ No newline at end of file
useAnnotation: false
searchSpacePath: /path/to/your/search_space.json
```
You can refer to [here](../examples/trials/README.md) for more information about how to write trial code using NNI APIs.
\ No newline at end of file
...@@ -9,17 +9,21 @@ searchSpacePath: ...@@ -9,17 +9,21 @@ searchSpacePath:
#choice: true, false #choice: true, false
useAnnotation: useAnnotation:
tuner: tuner:
tunerCommand: #choice: TPE, Random, Anneal, Evolution
tunerCwd: builtinTunerName:
tunerGpuNum: classArgs:
#choice: maximize, minimize
optimize_mode:
assessor: assessor:
assessorCommand: #choice: Medianstop
assessorCwd: builtinAssessorName:
assessorGpuNum: classArgs:
#choice: maximize, minimize
optimize_mode:
trial: trial:
trialCommand: command:
trialCodeDir: codeDir:
trialGpuNum: gpuNum:
#machineList can be empty if the platform is local #machineList can be empty if the platform is local
machineList: machineList:
- ip: - ip:
......
...@@ -8,9 +8,12 @@ trainingServicePlatform: local ...@@ -8,9 +8,12 @@ trainingServicePlatform: local
#choice: true, false #choice: true, false
useAnnotation: false useAnnotation: false
tuner: tuner:
tunerCommand: python3 __main__.py codeDir: ~/nni/examples/tuners/ga_customer_tuner
tunerCwd: /usr/share/nni/examples/tuners/ga_customer_tuner classFileName: customer_tuner.py
className: CustomerTuner
classArgs:
optimize_mode: maximize
trial: trial:
trialCommand: python3 trial.py command: python3 trial.py
trialCodeDir: /usr/share/nni/examples/trials/ga_squad codeDir: ~/nni/examples/trials/ga_squad
trialGpuNum: 0 gpuNum: 0
\ No newline at end of file \ No newline at end of file
...@@ -9,10 +9,11 @@ trainingServicePlatform: local ...@@ -9,10 +9,11 @@ trainingServicePlatform: local
useAnnotation: true useAnnotation: true
tuner: tuner:
#choice: TPE, Random, Anneal, Evolution #choice: TPE, Random, Anneal, Evolution
tunerName: TPE builtinTunerName: TPE
#choice: Maximize, Minimize classArgs:
optimizationMode: Maximize #choice: maximize, minimize
optimize_mode: maximize
trial: trial:
trialCommand: python3 mnist.py command: python3 mnist.py
trialCodeDir: /usr/share/nni/examples/trials/mnist-annotation codeDir: ~/nni/examples/trials/mnist-annotation
trialGpuNum: 0 gpuNum: 0
\ No newline at end of file \ No newline at end of file
...@@ -5,15 +5,16 @@ maxExecDuration: 1h ...@@ -5,15 +5,16 @@ maxExecDuration: 1h
maxTrialNum: 1 maxTrialNum: 1
#choice: local, remote #choice: local, remote
trainingServicePlatform: local trainingServicePlatform: local
searchSpacePath: /usr/share/nni/examples/trials/mnist-keras/search_space.json searchSpacePath: ~/nni/examples/trials/mnist-keras/search_space.json
#choice: true, false #choice: true, false
useAnnotation: false useAnnotation: false
tuner: tuner:
#choice: TPE, Random, Anneal, Evolution #choice: TPE, Random, Anneal, Evolution
tunerName: TPE builtinTunerName: TPE
#choice: Maximize, Minimize classArgs:
optimizationMode: Maximize #choice: maximize, minimize
optimize_mode: maximize
trial: trial:
trialCommand: python3 mnist-keras.py command: python3 mnist-keras.py
trialCodeDir: /usr/share/nni/examples/trials/mnist-keras codeDir: ~/nni/examples/trials/mnist-keras
trialGpuNum: 0 gpuNum: 0
\ No newline at end of file \ No newline at end of file
...@@ -84,7 +84,7 @@ class SendMetrics(keras.callbacks.Callback): ...@@ -84,7 +84,7 @@ class SendMetrics(keras.callbacks.Callback):
Run on end of each epoch Run on end of each epoch
''' '''
LOG.debug(logs) LOG.debug(logs)
nni.report_intermediate_result(logs) nni.report_intermediate_result(logs['acc'])
def train(args, params): def train(args, params):
''' '''
......
...@@ -9,10 +9,11 @@ trainingServicePlatform: local ...@@ -9,10 +9,11 @@ trainingServicePlatform: local
useAnnotation: true useAnnotation: true
tuner: tuner:
#choice: TPE, Random, Anneal, Evolution #choice: TPE, Random, Anneal, Evolution
tunerName: TPE builtinTunerName: TPE
#choice: Maximize, Minimize classArgs:
optimizationMode: Maximize #choice: maximize, minimize
optimize_mode: maximize
trial: trial:
trialCommand: python3 mnist.py command: python3 mnist.py
trialCodeDir: /usr/share/nni/examples/trials/mnist-smartparam codeDir: ~/nni/examples/trials/mnist-smartparam
trialGpuNum: 0 gpuNum: 0
\ No newline at end of file \ No newline at end of file
...@@ -5,15 +5,16 @@ maxExecDuration: 1h ...@@ -5,15 +5,16 @@ maxExecDuration: 1h
maxTrialNum: 1 maxTrialNum: 1
#choice: local, remote #choice: local, remote
trainingServicePlatform: local trainingServicePlatform: local
searchSpacePath: /usr/share/nni/examples/trials/mnist/search_space.json searchSpacePath: ~/nni/examples/trials/mnist/search_space.json
#choice: true, false #choice: true, false
useAnnotation: false useAnnotation: false
tuner: tuner:
#choice: TPE, Random, Anneal, Evolution #choice: TPE, Random, Anneal, Evolution
tunerName: TPE builtinTunerName: TPE
#choice: Maximize, Minimize classArgs:
optimizationMode: Maximize #choice: maximize, minimize
optimize_mode: maximize
trial: trial:
trialCommand: python3 mnist.py command: python3 mnist.py
trialCodeDir: /usr/share/nni/examples/trials/mnist codeDir: ~/nni/examples/trials/mnist
trialGpuNum: 0 gpuNum: 0
\ No newline at end of file \ No newline at end of file
...@@ -5,20 +5,22 @@ maxExecDuration: 1h ...@@ -5,20 +5,22 @@ maxExecDuration: 1h
maxTrialNum: 1 maxTrialNum: 1
#choice: local, remote #choice: local, remote
trainingServicePlatform: local trainingServicePlatform: local
searchSpacePath: /usr/share/nni/examples/trials/mnist/search_space.json searchSpacePath: ~/nni/examples/trials/mnist/search_space.json
#choice: true, false #choice: true, false
useAnnotation: false useAnnotation: false
tuner: tuner:
#choice: TPE, Random, Anneal, Evolution #choice: TPE, Random, Anneal, Evolution
tunerName: TPE builtinTunerName: TPE
#choice: Maximize, Minimize classArgs:
optimizationMode: Maximize #choice: maximize, minimize
optimize_mode: maximize
assessor: assessor:
#choice: Medianstop #choice: Medianstop
assessorName: Medianstop builtinAssessorName: Medianstop
#choice: Maximize, Minimize classArgs:
optimizationMode: Maximize #choice: maximize, minimize
optimize_mode: maximize
trial: trial:
trialCommand: python3 mnist.py command: python3 mnist.py
trialCodeDir: /usr/share/nni/examples/trials/mnist codeDir: ~/nni/examples/trials/mnist
trialGpuNum: 0 gpuNum: 0
\ No newline at end of file \ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment