Commit 7c103cda authored by suiguoxin's avatar suiguoxin
Browse files

Merge branch 'master' of github.com:microsoft/nni

parents 4bbffd17 7adebd6c
...@@ -17,11 +17,13 @@ ...@@ -17,11 +17,13 @@
NNI (Neural Network Intelligence) is a toolkit to help users run automated machine learning (AutoML) experiments. NNI (Neural Network Intelligence) is a toolkit to help users run automated machine learning (AutoML) experiments.
The tool dispatches and runs trial jobs generated by tuning algorithms to search the best neural architecture and/or hyper-parameters in different environments like local machine, remote servers and cloud. The tool dispatches and runs trial jobs generated by tuning algorithms to search the best neural architecture and/or hyper-parameters in different environments like local machine, remote servers and cloud.
### **NNI [v0.9](https://github.com/Microsoft/nni/releases) has been released! &nbsp;<a href="#nni-released-reminder"><img width="48" src="docs/img/release_icon.png"></a>** ### **NNI [v0.9](https://github.com/Microsoft/nni/releases) has been released! &nbsp;<a href="#nni-released-reminder"><img width="48" src="docs/img/release_icon.png"></a>**
<p align="center"> <p align="center">
<a href="#nni-has-been-released"><img src="docs/img/overview.svg" /></a> <a href="#nni-has-been-released"><img src="docs/img/overview.svg" /></a>
</p> </p>
<div>
<table> <table>
<tbody> <tbody>
<tr align="center" valign="bottom"> <tr align="center" valign="bottom">
...@@ -56,28 +58,28 @@ The tool dispatches and runs trial jobs generated by tuning algorithms to search ...@@ -56,28 +58,28 @@ The tool dispatches and runs trial jobs generated by tuning algorithms to search
<a href="docs/en_US/Tuner/BuiltinTuner.md">Tuner</a> <a href="docs/en_US/Tuner/BuiltinTuner.md">Tuner</a>
<br /> <br />
<ul> <ul>
<b style="margin-left:-20px"><font size=4 color=#800000>General Tuner</font></b> <b style="margin-left:-20px">General Tuner</b>
<li><a href="docs/en_US/Tuner/BuiltinTuner.md#Random"><font size=2.9>Random Search</font></a></li> <li><a href="docs/en_US/Tuner/BuiltinTuner.md#Random">Random Search</a></li>
<li><a href="docs/en_US/Tuner/BuiltinTuner.md#Evolution"><font size=2.9>Naïve Evolution</font></a></li> <li><a href="docs/en_US/Tuner/BuiltinTuner.md#Evolution">Naïve Evolution</a></li>
<b><font size=4 color=#800000 style="margin-left:-20px">Tuner for HPO</font></b> <b style="margin-left:-20px">Tuner for HPO</b>
<li><a href="docs/en_US/Tuner/BuiltinTuner.md#TPE"><font size=2.9>TPE</font></a></li> <li><a href="docs/en_US/Tuner/BuiltinTuner.md#TPE">TPE</a></li>
<li><a href="docs/en_US/Tuner/BuiltinTuner.md#Anneal"><font size=2.9>Anneal</font></a></li> <li><a href="docs/en_US/Tuner/BuiltinTuner.md#Anneal">Anneal</a></li>
<li><a href="docs/en_US/Tuner/BuiltinTuner.md#SMAC"><font size=2.9>SMAC</font></a></li> <li><a href="docs/en_US/Tuner/BuiltinTuner.md#SMAC">SMAC</a></li>
<li><a href="docs/en_US/Tuner/BuiltinTuner.md#Batch"><font size=2.9>Batch</font></a></li> <li><a href="docs/en_US/Tuner/BuiltinTuner.md#Batch">Batch</a></li>
<li><a href="docs/en_US/Tuner/BuiltinTuner.md#GridSearch"><font size=2.9>Grid Search</font></a></li> <li><a href="docs/en_US/Tuner/BuiltinTuner.md#GridSearch">Grid Search</a></li>
<li><a href="docs/en_US/Tuner/BuiltinTuner.md#Hyperband"><font size=2.9>Hyperband</font></a></li> <li><a href="docs/en_US/Tuner/BuiltinTuner.md#Hyperband">Hyperband</a></li>
<li><a href="docs/en_US/Tuner/BuiltinTuner.md#MetisTuner"><font size=2.9>Metis Tuner</font></a></li> <li><a href="docs/en_US/Tuner/BuiltinTuner.md#MetisTuner">Metis Tuner</a></li>
<li><a href="docs/en_US/Tuner/BuiltinTuner.md#BOHB"><font size=2.9>BOHB</font></a></li> <li><a href="docs/en_US/Tuner/BuiltinTuner.md#BOHB">BOHB</a></li>
<li><a href="docs/en_US/Tuner/BuiltinTuner.md#GPTuner"><font size=2.9>GP Tuner</font></a></li> <li><a href="docs/en_US/Tuner/BuiltinTuner.md#GPTuner">GP Tuner</a></li>
<b style="margin-left:-20px"><font size=4 color=#800000 style="margin-left:-20px">Tuner for NAS</font></b> <b style="margin-left:-20px">Tuner for NAS</b>
<li><a href="docs/en_US/Tuner/BuiltinTuner.md#NetworkMorphism"><font size=2.9>Network Morphism</font></a></li> <li><a href="docs/en_US/Tuner/BuiltinTuner.md#NetworkMorphism">Network Morphism</a></li>
<li><a href="examples/tuners/enas_nni/README.md"><font size=2.9>ENAS</font></a></li> <li><a href="examples/tuners/enas_nni/README.md">ENAS</a></li>
</ul> </ul>
<a href="docs/en_US/Assessor/BuiltinAssessor.md">Assessor</a> <a href="docs/en_US/Assessor/BuiltinAssessor.md">Assessor</a>
<ul> <ul>
<li><a href="docs/en_US/Assessor/BuiltinAssessor.md#Medianstop"><font size=2.9>Median Stop</font></a></li> <li><a href="docs/en_US/Assessor/BuiltinAssessor.md#Medianstop">Median Stop</a></li>
<li><a href="docs/en_US/Assessor/BuiltinAssessor.md#Curvefitting"><font size=2.9>Curve Fitting</font></a></li> <li><a href="docs/en_US/Assessor/BuiltinAssessor.md#Curvefitting">Curve Fitting</a></li>
</ul> </ul>
</td> </td>
<td> <td>
<ul> <ul>
...@@ -90,53 +92,11 @@ The tool dispatches and runs trial jobs generated by tuning algorithms to search ...@@ -90,53 +92,11 @@ The tool dispatches and runs trial jobs generated by tuning algorithms to search
</ul> </ul>
</ul> </ul>
</td> </td>
</tr> </tr>
<tr align="center" valign="bottom">
<td style="border-top:#FF0000 solid 0px;">
<b>References</b>
<img src="docs/img/bar.png"/>
</td>
<td style="border-top:#FF0000 solid 0px;">
<b>References</b>
<img src="docs/img/bar.png"/>
</td>
<td style="border-top:#FF0000 solid 0px;">
<b>References</b>
<img src="docs/img/bar.png"/>
</td>
</tr>
<tr valign="top">
<td style="border-top:#FF0000 solid 0px;">
<ul>
<li><a href="docs/en_US/sdk_reference.rst">Python API</a></li>
<li><a href="docs/en_US/Tutorial/AnnotationSpec.md">NNI Annotation</a></li>
<li><a href="docs/en_US/TrialExample/Trials.md#nni-python-annotation">Annotation tutorial</a></li>
</ul>
</td>
<td style="border-top:#FF0000 solid 0px;">
<ul>
<li><a href="docs/en_US/tuners.rst">Try different tuners</a></li>
<li><a href="docs/en_US/assessors.rst">Try different assessors</a></li>
<li><a href="docs/en_US/Tuner/CustomizeTuner.md">Implement a customized tuner</a></li>
<li><a href="docs/en_US/Tuner/CustomizeAdvisor.md">Implement a customized advisor</a></li>
<li><a href="docs/en_US/Assessor/CustomizeAssessor.md">Implement a customized assessor </a></li>
<li><a href="docs/en_US/CommunitySharings/HpoComparision.md">HPO Comparison</a></li>
<li><a href="docs/en_US/CommunitySharings/NasComparision.md">NAS Comparison</a></li>
<li><a href="docs/en_US/CommunitySharings/RecommendersSvd.md">Automatically tuning SVD on NNI</a></li>
</ul>
</td>
<td style="border-top:#FF0000 solid 0px;">
<ul>
<li><a href="docs/en_US/TrainingService/HowToImplementTrainingService.md">Implement TrainingService in NNI</a></li>
<li><a href="docs/en_US/TrainingService/LocalMode.md">Run an experiment on local</a></li>
<li><a href="docs/en_US/TrainingService/KubeflowMode.md">Run an experiment on Kubeflow</a></li>
<li><a href="docs/en_US/TrainingService/PaiMode.md">Run an experiment on OpenPAI?</a></li>
<li><a href="docs/en_US/TrainingService/RemoteMachineMode.md">Run an experiment on multiple machines?</a></li>
</ul>
</td>
</tbody> </tbody>
</table> </table>
</div>
## **Who should consider using NNI** ## **Who should consider using NNI**
...@@ -291,17 +251,18 @@ Maybe you want to read: ...@@ -291,17 +251,18 @@ Maybe you want to read:
* [Config an experiment](docs/en_US/Tutorial/ExperimentConfig.md) * [Config an experiment](docs/en_US/Tutorial/ExperimentConfig.md)
* [How to use annotation](docs/en_US/TrialExample/Trials.md#nni-python-annotation) * [How to use annotation](docs/en_US/TrialExample/Trials.md#nni-python-annotation)
## **Tutorials** ## **Tutorials**
* [Run an experiment on OpenPAI?](docs/en_US/PaiMode.md) * [Run an experiment on OpenPAI](docs/en_US/TrainingService/PaiMode.md)
* [Run an experiment on Kubeflow?](docs/en_US/KubeflowMode.md) * [Run an experiment on Kubeflow](docs/en_US/TrainingService/KubeflowMode.md)
* [Run an experiment on local (with multiple GPUs)?](docs/en_US/LocalMode.md) * [Run an experiment on local (with multiple GPUs)](docs/en_US/TrainingService/LocalMode.md)
* [Run an experiment on multiple machines?](docs/en_US/RemoteMachineMode.md) * [Run an experiment on multiple machines](docs/en_US/TrainingService/RemoteMachineMode.md)
* [Try different tuners](docs/en_US/tuners.rst) * [Try different tuners](docs/en_US/Tuner/BuiltinTuner.md)
* [Try different assessors](docs/en_US/assessors.rst) * [Try different assessors](docs/en_US/Assessor/BuiltinAssessor.md)
* [Implement a customized tuner](docs/en_US/Tuner/CustomizeTuner.md) * [Implement a customized tuner](docs/en_US/Tuner/CustomizeTuner.md)
* [Implement a customized assessor](docs/en_US/CustomizeAssessor.md) * [Implement a customized assessor](docs/en_US/Assessor/CustomizeAssessor.md)
* [Use Genetic Algorithm to find good model architectures for Reading Comprehension task](examples/trials/ga_squad/README.md) * [Use Genetic Algorithm to find good model architectures for Reading Comprehension task](docs/en_US/TrialExample/SquadEvolutionExamples.md)
## **Contribute** ## **Contribute**
This project welcomes contributions and there are many ways in which you can participate in the project, for example: This project welcomes contributions and there are many ways in which you can participate in the project, for example:
......
# ChangeLog # ChangeLog
## Release 0.9 - 7/1/2019 ## Release 0.9 - 7/1/2019
### Major Features ### Major Features
...@@ -95,18 +96,18 @@ ...@@ -95,18 +96,18 @@
### Major Features ### Major Features
* [Version checking](https://github.com/Microsoft/nni/blob/master/docs/en_US/PaiMode.md#version-check) * [Version checking](TrainingService/PaiMode.md)
* check whether the version is consistent between nniManager and trialKeeper * check whether the version is consistent between nniManager and trialKeeper
* [Report final metrics for early stop job](https://github.com/Microsoft/nni/issues/776) * [Report final metrics for early stop job](https://github.com/microsoft/nni/issues/776)
* If includeIntermediateResults is true, the last intermediate result of the trial that is early stopped by assessor is sent to tuner as final result. The default value of includeIntermediateResults is false. * If includeIntermediateResults is true, the last intermediate result of the trial that is early stopped by assessor is sent to tuner as final result. The default value of includeIntermediateResults is false.
* [Separate Tuner/Assessor](https://github.com/Microsoft/nni/issues/841) * [Separate Tuner/Assessor](https://github.com/microsoft/nni/issues/841)
* Adds two pipes to separate message receiving channels for tuner and assessor. * Adds two pipes to separate message receiving channels for tuner and assessor.
* Make log collection feature configurable * Make log collection feature configurable
* Add intermediate result graph for all trials * Add intermediate result graph for all trials
### Bug fix ### Bug fix
* [Add shmMB config key for OpenPAI](https://github.com/Microsoft/nni/issues/842) * [Add shmMB config key for OpenPAI](https://github.com/microsoft/nni/issues/842)
* Fix the bug that doesn't show any result if metrics is dict * Fix the bug that doesn't show any result if metrics is dict
* Fix the number calculation issue for float types in hyperband * Fix the number calculation issue for float types in hyperband
* Fix a bug in the search space conversion in SMAC tuner * Fix a bug in the search space conversion in SMAC tuner
...@@ -121,8 +122,8 @@ ...@@ -121,8 +122,8 @@
### Documentation ### Documentation
* Chinese version document: https://nni.readthedocs.io/zh/latest/ * Chinese version document: https://nni.readthedocs.io/zh/latest/
* Debuggability/serviceability document: https://nni.readthedocs.io/en/latest/HowToDebug.html * Debuggability/serviceability document: https://nni.readthedocs.io/en/latest/Tutorial/HowToDebug.html
* Tuner assessor reference: https://nni.readthedocs.io/en/latest/sdk_reference.html#tuner * Tuner assessor reference: https://nni.readthedocs.io/en/latest/sdk_reference.html
### Bug Fixes and Other Changes ### Bug Fixes and Other Changes
* Fix a race condition bug that does not store trial job cancel status correctly. * Fix a race condition bug that does not store trial job cancel status correctly.
...@@ -134,8 +135,8 @@ ...@@ -134,8 +135,8 @@
## Release 0.5.1 - 1/31/2018 ## Release 0.5.1 - 1/31/2018
### Improvements ### Improvements
* Making [log directory](https://github.com/Microsoft/nni/blob/v0.5.1/docs/en_US/ExperimentConfig.md) configurable * Making [log directory](https://github.com/microsoft/nni/blob/v0.5.1/docs/ExperimentConfig.md) configurable
* Support [different levels of logs](https://github.com/Microsoft/nni/blob/v0.5.1/docs/en_US/ExperimentConfig.md), making it easier for debugging * Support [different levels of logs](https://github.com/microsoft/nni/blob/v0.5.1/docs/ExperimentConfig.md), making it easier for debugging
### Documentation ### Documentation
* Reorganized documentation & New Homepage Released: https://nni.readthedocs.io/en/latest/ * Reorganized documentation & New Homepage Released: https://nni.readthedocs.io/en/latest/
...@@ -200,8 +201,8 @@ ...@@ -200,8 +201,8 @@
### New examples ### New examples
* [FashionMnist](https://github.com/Microsoft/nni/tree/master/examples/trials/network_morphism), work together with network morphism tuner * [FashionMnist](https://github.com/microsoft/nni/tree/master/examples/trials/network_morphism), work together with network morphism tuner
* [Distributed MNIST example](https://github.com/Microsoft/nni/tree/master/examples/trials/mnist-distributed-pytorch) written in PyTorch * [Distributed MNIST example](https://github.com/microsoft/nni/tree/master/examples/trials/mnist-distributed-pytorch) written in PyTorch
## Release 0.4 - 12/6/2018 ## Release 0.4 - 12/6/2018
...@@ -209,7 +210,7 @@ ...@@ -209,7 +210,7 @@
* [Kubeflow Training service](TrainingService/KubeflowMode.md) * [Kubeflow Training service](TrainingService/KubeflowMode.md)
* Support tf-operator * Support tf-operator
* [Distributed trial example](https://github.com/Microsoft/nni/tree/master/examples/trials/mnist-distributed/dist_mnist.py) on Kubeflow * [Distributed trial example](https://github.com/microsoft/nni/tree/master/examples/trials/mnist-distributed/dist_mnist.py) on Kubeflow
* [Grid search tuner](Tuner/GridsearchTuner.md) * [Grid search tuner](Tuner/GridsearchTuner.md)
* [Hyperband tuner](Tuner/HyperbandAdvisor.md) * [Hyperband tuner](Tuner/HyperbandAdvisor.md)
* Support launch NNI experiment on MAC * Support launch NNI experiment on MAC
...@@ -256,7 +257,7 @@ ...@@ -256,7 +257,7 @@
Each trial job is allocated a unique sequence number, which can be retrieved by nni.get_sequence_id() API. Each trial job is allocated a unique sequence number, which can be retrieved by nni.get_sequence_id() API.
```bash ```bash
git clone -b v0.3 https://github.com/Microsoft/nni.git git clone -b v0.3 https://github.com/microsoft/nni.git
``` ```
* **nni.report_final_result(result)** API supports more data types for result parameter. * **nni.report_final_result(result)** API supports more data types for result parameter.
...@@ -278,20 +279,19 @@ ...@@ -278,20 +279,19 @@
docker pull msranni/nni:latest docker pull msranni/nni:latest
``` ```
* New trial example: [NNI Sklearn Example](https://github.com/Microsoft/nni/tree/master/examples/trials/sklearn) * New trial example: [NNI Sklearn Example](https://github.com/microsoft/nni/tree/master/examples/trials/sklearn)
* New competition example: [Kaggle Competition TGS Salt Example](https://github.com/Microsoft/nni/tree/master/examples/trials/kaggle-tgs-salt) * New competition example: [Kaggle Competition TGS Salt Example](https://github.com/microsoft/nni/tree/master/examples/trials/kaggle-tgs-salt)
### Others ### Others
* UI refactoring, refer to [WebUI doc](Tutorial/WebUI.md) for how to work with the new UI. * UI refactoring, refer to [WebUI doc](Tutorial/WebUI.md) for how to work with the new UI.
* Continuous Integration: NNI had switched to Azure pipelines * Continuous Integration: NNI had switched to Azure pipelines
* [Known Issues in release 0.3.0](https://github.com/Microsoft/nni/labels/nni030knownissues).
## Release 0.2.0 - 9/29/2018 ## Release 0.2.0 - 9/29/2018
### Major Features ### Major Features
* Support [OpenPAI](https://github.com/Microsoft/pai) Training Platform (See [here](TrainingService/PaiMode.md) for instructions about how to submit NNI job in pai mode) * Support [OpenPAI](https://github.com/microsoft/pai) Training Platform (See [here](TrainingService/PaiMode.md) for instructions about how to submit NNI job in pai mode)
* Support training services on pai mode. NNI trials will be scheduled to run on OpenPAI cluster * Support training services on pai mode. NNI trials will be scheduled to run on OpenPAI cluster
* NNI trial's output (including logs and model file) will be copied to OpenPAI HDFS for further debugging and checking * NNI trial's output (including logs and model file) will be copied to OpenPAI HDFS for further debugging and checking
* Support [SMAC](https://www.cs.ubc.ca/~hutter/papers/10-TR-SMAC.pdf) tuner (See [here](Tuner/SmacTuner.md) for instructions about how to use SMAC tuner) * Support [SMAC](https://www.cs.ubc.ca/~hutter/papers/10-TR-SMAC.pdf) tuner (See [here](Tuner/SmacTuner.md) for instructions about how to use SMAC tuner)
...@@ -301,9 +301,6 @@ ...@@ -301,9 +301,6 @@
* Update ga squad example and related documentation * Update ga squad example and related documentation
* WebUI UX small enhancement and bug fix * WebUI UX small enhancement and bug fix
### Known Issues
[Known Issues in release 0.2.0](https://github.com/Microsoft/nni/labels/nni020knownissues).
## Release 0.1.0 - 9/10/2018 (initial release) ## Release 0.1.0 - 9/10/2018 (initial release)
...@@ -327,6 +324,3 @@ Initial release of Neural Network Intelligence (NNI). ...@@ -327,6 +324,3 @@ Initial release of Neural Network Intelligence (NNI).
* Others * Others
* Support simple GPU job scheduling * Support simple GPU job scheduling
### Known Issues
[Known Issues in release 0.1.0](https://github.com/Microsoft/nni/labels/nni010knownissues).
configspace @ f389e1d0
Subproject commit f389e1d0a72564f7f1fd4d86039e5f393a45a058
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
...@@ -141,6 +141,7 @@ class NNIDataStore implements DataStore { ...@@ -141,6 +141,7 @@ class NNIDataStore implements DataStore {
public async getTrialJob(trialJobId: string): Promise<TrialJobInfo> { public async getTrialJob(trialJobId: string): Promise<TrialJobInfo> {
const trialJobs: TrialJobInfo[] = await this.queryTrialJobs(undefined, trialJobId); const trialJobs: TrialJobInfo[] = await this.queryTrialJobs(undefined, trialJobId);
assert(trialJobs.length <= 1);
return trialJobs[0]; return trialJobs[0];
} }
......
...@@ -242,10 +242,8 @@ class NNIManager implements Manager { ...@@ -242,10 +242,8 @@ class NNIManager implements Manager {
}); });
} }
public getTrialJob(trialJobId: string): Promise<TrialJobDetail> { public getTrialJob(trialJobId: string): Promise<TrialJobInfo> {
return Promise.resolve( return this.dataStore.getTrialJob(trialJobId);
this.trainingService.getTrialJob(trialJobId)
);
} }
public async setClusterMetadata(key: string, value: string): Promise<void> { public async setClusterMetadata(key: string, value: string): Promise<void> {
......
...@@ -221,7 +221,12 @@ class MockedDataStore implements DataStore { ...@@ -221,7 +221,12 @@ class MockedDataStore implements DataStore {
} }
public getTrialJob(trialJobId: string): Promise<TrialJobInfo> { public getTrialJob(trialJobId: string): Promise<TrialJobInfo> {
throw new Error("Method not implemented."); return Promise.resolve({
id: '1234',
status: 'SUCCEEDED',
startTime: Date.now(),
endTime: Date.now()
});
} }
private async getFinalMetricData(trialJobId: string): Promise<any> { private async getFinalMetricData(trialJobId: string): Promise<any> {
......
...@@ -56,7 +56,11 @@ ...@@ -56,7 +56,11 @@
}, },
"resolutions": { "resolutions": {
"mem": "^4.0.0", "mem": "^4.0.0",
"handlebars": "^4.1.0" "handlebars": "^4.1.0",
"lodash": "^4.17.13",
"lodash.merge": "^4.6.2",
"node.extend": "^1.1.7",
"hoek": "^4.2.1"
}, },
"engines": { "engines": {
"node": ">=10.0.0" "node": ">=10.0.0"
......
...@@ -1007,6 +1007,10 @@ fs.realpath@^1.0.0: ...@@ -1007,6 +1007,10 @@ fs.realpath@^1.0.0:
version "1.0.0" version "1.0.0"
resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
function-bind@^1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d"
gauge@~2.7.3: gauge@~2.7.3:
version "2.7.4" version "2.7.4"
resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7" resolved "https://registry.yarnpkg.com/gauge/-/gauge-2.7.4.tgz#2c03405c7538c39d7eb37b317022e325fb018bf7"
...@@ -1157,6 +1161,12 @@ has-unicode@^2.0.0: ...@@ -1157,6 +1161,12 @@ has-unicode@^2.0.0:
version "2.0.1" version "2.0.1"
resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9" resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
has@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/has/-/has-1.0.3.tgz#722d7cbfc1f6aa8241f16dd814e011e1f41e8796"
dependencies:
function-bind "^1.1.1"
hash-base@^3.0.0: hash-base@^3.0.0:
version "3.0.4" version "3.0.4"
resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-3.0.4.tgz#5fc8686847ecd73499403319a6b0a3f3f6ae4918" resolved "https://registry.yarnpkg.com/hash-base/-/hash-base-3.0.4.tgz#5fc8686847ecd73499403319a6b0a3f3f6ae4918"
...@@ -1168,9 +1178,9 @@ he@1.1.1: ...@@ -1168,9 +1178,9 @@ he@1.1.1:
version "1.1.1" version "1.1.1"
resolved "https://registry.yarnpkg.com/he/-/he-1.1.1.tgz#93410fd21b009735151f8868c2f271f3427e23fd" resolved "https://registry.yarnpkg.com/he/-/he-1.1.1.tgz#93410fd21b009735151f8868c2f271f3427e23fd"
hoek@2.x.x: hoek@2.x.x, hoek@^4.2.1:
version "2.16.3" version "4.2.1"
resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed" resolved "https://registry.yarnpkg.com/hoek/-/hoek-4.2.1.tgz#9634502aa12c445dd5a7c5734b572bb8738aacbb"
hosted-git-info@^2.1.4: hosted-git-info@^2.1.4:
version "2.7.1" version "2.7.1"
...@@ -1310,9 +1320,9 @@ is-typedarray@~1.0.0: ...@@ -1310,9 +1320,9 @@ is-typedarray@~1.0.0:
version "1.0.0" version "1.0.0"
resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
is@~0.2.6: is@^3.2.1:
version "0.2.7" version "3.3.0"
resolved "http://registry.npmjs.org/is/-/is-0.2.7.tgz#3b34a2c48f359972f35042849193ae7264b63562" resolved "https://registry.yarnpkg.com/is/-/is-3.3.0.tgz#61cff6dd3c4193db94a3d62582072b44e5645d79"
isarray@~1.0.0: isarray@~1.0.0:
version "1.0.0" version "1.0.0"
...@@ -1532,9 +1542,9 @@ lodash.intersection@^4.4.0: ...@@ -1532,9 +1542,9 @@ lodash.intersection@^4.4.0:
version "4.4.0" version "4.4.0"
resolved "https://registry.yarnpkg.com/lodash.intersection/-/lodash.intersection-4.4.0.tgz#0a11ba631d0e95c23c7f2f4cbb9a692ed178e705" resolved "https://registry.yarnpkg.com/lodash.intersection/-/lodash.intersection-4.4.0.tgz#0a11ba631d0e95c23c7f2f4cbb9a692ed178e705"
lodash.merge@^4.6.1: lodash.merge@^4.6.1, lodash.merge@^4.6.2:
version "4.6.1" version "4.6.2"
resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.1.tgz#adc25d9cb99b9391c59624f379fbba60d7111d54" resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a"
lodash.omit@^4.5.0: lodash.omit@^4.5.0:
version "4.5.0" version "4.5.0"
...@@ -1552,9 +1562,9 @@ lodash.uniq@^4.5.0: ...@@ -1552,9 +1562,9 @@ lodash.uniq@^4.5.0:
version "4.5.0" version "4.5.0"
resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773" resolved "https://registry.yarnpkg.com/lodash.uniq/-/lodash.uniq-4.5.0.tgz#d0225373aeb652adc1bc82e4945339a842754773"
lodash@^4.17.10, lodash@^4.17.11: lodash@^4.17.10, lodash@^4.17.11, lodash@^4.17.13:
version "4.17.11" version "4.17.14"
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.11.tgz#b39ea6229ef607ecd89e2c8df12536891cac9b8d" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.14.tgz#9ce487ae66c96254fe20b599f21b6816028078ba"
long@^4.0.0: long@^4.0.0:
version "4.0.0" version "4.0.0"
...@@ -1801,12 +1811,12 @@ node-version@^1.0.0: ...@@ -1801,12 +1811,12 @@ node-version@^1.0.0:
version "1.2.0" version "1.2.0"
resolved "https://registry.yarnpkg.com/node-version/-/node-version-1.2.0.tgz#34fde3ffa8e1149bd323983479dda620e1b5060d" resolved "https://registry.yarnpkg.com/node-version/-/node-version-1.2.0.tgz#34fde3ffa8e1149bd323983479dda620e1b5060d"
node.extend@1.0.8: node.extend@1.0.8, node.extend@^1.1.7:
version "1.0.8" version "1.1.8"
resolved "https://registry.yarnpkg.com/node.extend/-/node.extend-1.0.8.tgz#bab04379f7383f4587990c9df07b6a7f65db772b" resolved "https://registry.yarnpkg.com/node.extend/-/node.extend-1.1.8.tgz#0aab3e63789f4e6d68b42bc00073ad1881243cf0"
dependencies: dependencies:
is "~0.2.6" has "^1.0.3"
object-keys "~0.4.0" is "^3.2.1"
node.flow@1.2.3: node.flow@1.2.3:
version "1.2.3" version "1.2.3"
...@@ -1910,10 +1920,6 @@ object-assign@^4.0.1, object-assign@^4.1.0: ...@@ -1910,10 +1920,6 @@ object-assign@^4.0.1, object-assign@^4.1.0:
version "4.1.1" version "4.1.1"
resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863"
object-keys@~0.4.0:
version "0.4.0"
resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-0.4.0.tgz#28a6aae7428dd2c3a92f3d95f21335dd204e0336"
oidc-token-hash@^3.0.1: oidc-token-hash@^3.0.1:
version "3.0.1" version "3.0.1"
resolved "https://registry.yarnpkg.com/oidc-token-hash/-/oidc-token-hash-3.0.1.tgz#f9e2496a3eea5f755671be54a97f57170a74081d" resolved "https://registry.yarnpkg.com/oidc-token-hash/-/oidc-token-hash-3.0.1.tgz#f9e2496a3eea5f755671be54a97f57170a74081d"
......
ConfigSpace==0.4.7 ConfigSpace==0.4.7
statsmodels==0.9.0 statsmodels==0.10.0
\ No newline at end of file \ No newline at end of file
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
"private": true, "private": true,
"dependencies": { "dependencies": {
"antd": "^3.8.1", "antd": "^3.8.1",
"axios": "^0.18.0", "axios": "^0.18.1",
"babel-polyfill": "^6.26.0", "babel-polyfill": "^6.26.0",
"copy-to-clipboard": "^3.0.8", "copy-to-clipboard": "^3.0.8",
"echarts": "^4.1.0", "echarts": "^4.1.0",
...@@ -35,6 +35,13 @@ ...@@ -35,6 +35,13 @@
}, },
"resolutions": { "resolutions": {
"mem": "^4.0.0", "mem": "^4.0.0",
"handlebars": "^4.1.0" "handlebars": "^4.1.0",
"lodash": "^4.17.13",
"lodash.template": "^4.5.0",
"js-yaml": "^3.13.1",
"webpack-dev-server": "^3.1.11",
"merge": "^1.2.1",
"cryptiles": "^4.1.2",
"hoek": "^4.2.1"
} }
} }
\ No newline at end of file
This diff is collapsed.
...@@ -131,3 +131,5 @@ if __name__ == '__main__': ...@@ -131,3 +131,5 @@ if __name__ == '__main__':
setup_experiment(args.preinstall) setup_experiment(args.preinstall)
run(args) run(args)
#
\ No newline at end of file
...@@ -2,9 +2,14 @@ import time ...@@ -2,9 +2,14 @@ import time
import nni import nni
if __name__ == '__main__': if __name__ == '__main__':
hyper_params = nni.get_next_parameter() nni.get_next_parameter()
for i in range(10): for i in range(10):
if i % 2 == 0:
print('report intermediate result without end of line.', end='')
else:
print('report intermediate result.')
nni.report_intermediate_result(0.1*(i+1)) nni.report_intermediate_result(0.1*(i+1))
time.sleep(2) time.sleep(2)
print('test final metrics not at line start.', end='')
nni.report_final_result(1.0) nni.report_final_result(1.0)
print('done')
...@@ -76,4 +76,5 @@ jobs: ...@@ -76,4 +76,5 @@ jobs:
--nni_docker_image $TEST_IMG --data_dir $(data_dir) --output_dir $(output_dir) --nni_manager_ip $(nni_manager_ip) --nni_docker_image $TEST_IMG --data_dir $(data_dir) --output_dir $(output_dir) --nni_manager_ip $(nni_manager_ip)
PATH=$HOME/.local/bin:$PATH python3 config_test.py --ts pai --exclude multi_phase_batch,multi_phase_grid PATH=$HOME/.local/bin:$PATH python3 config_test.py --ts pai --exclude multi_phase_batch,multi_phase_grid
PATH=$HOME/.local/bin:$PATH python3 metrics_test.py
displayName: 'integration test' displayName: 'integration test'
...@@ -53,6 +53,7 @@ jobs: ...@@ -53,6 +53,7 @@ jobs:
--remote_port $(cat port) --remote_pwd $(docker_pwd) --nni_manager_ip $(nni_manager_ip) --remote_port $(cat port) --remote_pwd $(docker_pwd) --nni_manager_ip $(nni_manager_ip)
cat training_service.yml cat training_service.yml
PATH=$HOME/.local/bin:$PATH python3 config_test.py --ts remote --exclude cifar10,multi_phase_batch,multi_phase_grid PATH=$HOME/.local/bin:$PATH python3 config_test.py --ts remote --exclude cifar10,multi_phase_batch,multi_phase_grid
PATH=$HOME/.local/bin:$PATH python3 metrics_test.py
displayName: 'integration test' displayName: 'integration test'
- task: SSH@0 - task: SSH@0
inputs: inputs:
......
...@@ -70,14 +70,22 @@ common_schema = { ...@@ -70,14 +70,22 @@ common_schema = {
} }
} }
tuner_schema_dict = { tuner_schema_dict = {
('TPE', 'Anneal', 'SMAC', 'Evolution'): { ('TPE', 'Anneal', 'SMAC'): {
'builtinTunerName': setChoice('builtinTunerName', 'TPE', 'Anneal', 'SMAC', 'Evolution'), 'builtinTunerName': setChoice('builtinTunerName', 'TPE', 'Anneal', 'SMAC'),
Optional('classArgs'): { Optional('classArgs'): {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'), 'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
}, },
Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool), Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999), Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
}, },
('Evolution'): {
'builtinTunerName': setChoice('builtinTunerName', 'Evolution'),
Optional('classArgs'): {
'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Optional('population_size'): setNumberRange('population_size', int, 0, 99999),
},
Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
},
('BatchTuner', 'GridSearch', 'Random'): { ('BatchTuner', 'GridSearch', 'Random'): {
'builtinTunerName': setChoice('builtinTunerName', 'BatchTuner', 'GridSearch', 'Random'), 'builtinTunerName': setChoice('builtinTunerName', 'BatchTuner', 'GridSearch', 'Random'),
Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999), Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
......
...@@ -151,7 +151,7 @@ def parse_ids(args): ...@@ -151,7 +151,7 @@ def parse_ids(args):
exit(1) exit(1)
else: else:
result_list = running_experiment_list result_list = running_experiment_list
elif args.all: elif args.id == 'all':
result_list = running_experiment_list result_list = running_experiment_list
elif args.id.endswith('*'): elif args.id.endswith('*'):
for id in running_experiment_list: for id in running_experiment_list:
...@@ -166,7 +166,7 @@ def parse_ids(args): ...@@ -166,7 +166,7 @@ def parse_ids(args):
if len(result_list) > 1: if len(result_list) > 1:
print_error(args.id + ' is ambiguous, please choose ' + ' '.join(result_list) ) print_error(args.id + ' is ambiguous, please choose ' + ' '.join(result_list) )
return None return None
if not result_list and args.id: if not result_list and args.id and args.id != 'all':
print_error('There are no experiments matched, please set correct experiment id...') print_error('There are no experiments matched, please set correct experiment id...')
elif not result_list: elif not result_list:
print_error('There is no experiment running...') print_error('There is no experiment running...')
......
...@@ -134,7 +134,7 @@ class PipeLogReader(threading.Thread): ...@@ -134,7 +134,7 @@ class PipeLogReader(threading.Thread):
self._is_read_completed = False self._is_read_completed = False
self.process_exit = False self.process_exit = False
self.log_collection = log_collection self.log_collection = log_collection
self.log_pattern = re.compile(r'^NNISDK_MEb\'.*\'$') self.log_pattern = re.compile(r'NNISDK_MEb\'.*\'$')
def _populateQueue(stream, queue): def _populateQueue(stream, queue):
''' '''
...@@ -172,11 +172,14 @@ class PipeLogReader(threading.Thread): ...@@ -172,11 +172,14 @@ class PipeLogReader(threading.Thread):
for line in iter(self.pipeReader.readline, ''): for line in iter(self.pipeReader.readline, ''):
self.orig_stdout.write(line.rstrip() + '\n') self.orig_stdout.write(line.rstrip() + '\n')
self.orig_stdout.flush() self.orig_stdout.flush()
if self.log_collection == 'none': if self.log_collection == 'none':
# If not match metrics, do not put the line into queue search_result = self.log_pattern.search(line)
if not self.log_pattern.match(line): if search_result:
continue metrics = search_result.group(0)
self.queue.put(line) self.queue.put(metrics+'\n')
else:
self.queue.put(line)
self.pipeReader.close() self.pipeReader.close()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment