Unverified Commit ccb2211e authored by chicm-ms's avatar chicm-ms Committed by GitHub
Browse files

Merge pull request #17 from microsoft/master

pull code
parents 58fd0c84 31dc58e9
{
"layer0":{"_type":"choice","_value":[
"Empty",
["Conv", {"_type":"choice","_value":[2,3,5]}],
["Max_pool", {"_type":"choice","_value":[2,3,5]}],
["Avg_pool", {"_type":"choice","_value":[2,3,5]}]
]},
"layer1":{"_type":"choice","_value":[
"Empty",
["Conv", {"_type":"choice","_value":[2,3,5]}],
["Max_pool", {"_type":"choice","_value":[2,3,5]}],
["Avg_pool", {"_type":"choice","_value":[2,3,5]}]
]},
"layer2":{"_type":"choice","_value":[
"Empty",
["Conv", {"_type":"choice","_value":[2,3,5]}],
["Max_pool", {"_type":"choice","_value":[2,3,5]}],
["Avg_pool", {"_type":"choice","_value":[2,3,5]}]
]},
"layer3":{"_type":"choice","_value":[
"Empty",
["Conv", {"_type":"choice","_value":[2,3,5]}],
["Max_pool", {"_type":"choice","_value":[2,3,5]}],
["Avg_pool", {"_type":"choice","_value":[2,3,5]}]
]},
"layer4":{"_type":"choice","_value":[
"Empty",
["Conv", {"_type":"choice","_value":[2,3,5]}],
["Max_pool", {"_type":"choice","_value":[2,3,5]}],
["Avg_pool", {"_type":"choice","_value":[2,3,5]}]
]},
"layer5":{"_type":"choice","_value":[
"Empty",
["Conv", {"_type":"choice","_value":[2,3,5]}],
["Max_pool", {"_type":"choice","_value":[2,3,5]}],
["Avg_pool", {"_type":"choice","_value":[2,3,5]}]
]},
"layer6":{"_type":"choice","_value":[
"Empty",
["Conv", {"_type":"choice","_value":[2,3,5]}],
["Max_pool", {"_type":"choice","_value":[2,3,5]}],
["Avg_pool", {"_type":"choice","_value":[2,3,5]}]
]},
"layer7":{"_type":"choice","_value":[
"Empty",
["Conv", {"_type":"choice","_value":[2,3,5]}],
["Max_pool", {"_type":"choice","_value":[2,3,5]}],
["Avg_pool", {"_type":"choice","_value":[2,3,5]}]
]},
"layer8":{"_type":"choice","_value":[
"Empty",
["Conv", {"_type":"choice","_value":[2,3,5]}],
["Max_pool", {"_type":"choice","_value":[2,3,5]}],
["Avg_pool", {"_type":"choice","_value":[2,3,5]}]
]},
"layer9":{"_type":"choice","_value":[
"Empty",
["Conv", {"_type":"choice","_value":[2,3,5]}],
["Max_pool", {"_type":"choice","_value":[2,3,5]}],
["Avg_pool", {"_type":"choice","_value":[2,3,5]}]
]}
"layer0": {
"_type": "choice",
"_value": [{
"_name": "Empty"
},
{
"_name": "Conv",
"kernel_size": {
"_type": "choice",
"_value": [1, 2, 3, 5]
}
},
{
"_name": "Max_pool",
"pooling_size": {
"_type": "choice",
"_value": [2, 3, 5]
}
},
{
"_name": "Avg_pool",
"pooling_size": {
"_type": "choice",
"_value": [2, 3, 5]
}
}
]
},
"layer1": {
"_type": "choice",
"_value": [{
"_name": "Empty"
},
{
"_name": "Conv",
"kernel_size": {
"_type": "choice",
"_value": [1, 2, 3, 5]
}
},
{
"_name": "Max_pool",
"pooling_size": {
"_type": "choice",
"_value": [2, 3, 5]
}
},
{
"_name": "Avg_pool",
"pooling_size": {
"_type": "choice",
"_value": [2, 3, 5]
}
}
]
},
"layer2": {
"_type": "choice",
"_value": [{
"_name": "Empty"
},
{
"_name": "Conv",
"kernel_size": {
"_type": "choice",
"_value": [1, 2, 3, 5]
}
},
{
"_name": "Max_pool",
"pooling_size": {
"_type": "choice",
"_value": [2, 3, 5]
}
},
{
"_name": "Avg_pool",
"pooling_size": {
"_type": "choice",
"_value": [2, 3, 5]
}
}
]
},
"layer3": {
"_type": "choice",
"_value": [{
"_name": "Empty"
},
{
"_name": "Conv",
"kernel_size": {
"_type": "choice",
"_value": [1, 2, 3, 5]
}
},
{
"_name": "Max_pool",
"pooling_size": {
"_type": "choice",
"_value": [2, 3, 5]
}
},
{
"_name": "Avg_pool",
"pooling_size": {
"_type": "choice",
"_value": [2, 3, 5]
}
}
]
}
}
\ No newline at end of file
......@@ -35,6 +35,23 @@ export class NNIError extends Error {
}
this.cause = err;
}
public static FromError(err: NNIError | Error | string, messagePrefix?: string): NNIError {
const msgPrefix: string = messagePrefix === undefined ? '' : messagePrefix;
if (err instanceof NNIError) {
if (err.message !== undefined) {
err.message = msgPrefix + err.message;
}
return err;
} else if (typeof(err) === 'string') {
return new NNIError('', msgPrefix + err);
} else if (err instanceof Error) {
return new NNIError('', msgPrefix + err.message, err);
} else {
throw new Error(`Wrong instance type: ${typeof(err)}`);
}
}
}
export class MethodNotImplementedError extends Error {
......
......@@ -106,7 +106,7 @@ class IpcInterface {
this.logger.warning('Commands jammed in buffer!');
}
} catch (err) {
throw new NNIError('Dispatcher Error', `Dispatcher Error: ${err.message}`, err);
throw NNIError.FromError(err, 'Dispatcher Error: ');
}
}
......
......@@ -77,7 +77,7 @@ class NNIDataStore implements DataStore {
try {
await this.db.storeExperimentProfile(experimentProfile);
} catch (err) {
throw new NNIError('Datastore error', `Datastore error: ${err.message}`, err);
throw NNIError.FromError(err, 'Datastore error: ');
}
}
......@@ -105,7 +105,7 @@ class NNIDataStore implements DataStore {
return this.db.storeTrialJobEvent(event, trialJobId, timestamp, hyperParameter, jobDetail).catch(
(err: Error) => {
throw new NNIError('Datastore error', `Datastore error: ${err.message}`, err);
throw NNIError.FromError(err, 'Datastore error: ');
}
);
}
......@@ -163,7 +163,7 @@ class NNIDataStore implements DataStore {
timestamp: Date.now()
}));
} catch (err) {
throw new NNIError('Datastore error', `Datastore error: ${err.message}`, err);
throw NNIError.FromError(err, 'Datastore error');
}
}
......
......@@ -372,7 +372,7 @@ class NNIManager implements Manager {
private async periodicallyUpdateExecDuration(): Promise<void> {
let count: number = 1;
while (this.status.status !== 'STOPPING' && this.status.status !== 'STOPPED') {
while (!['ERROR', 'STOPPING', 'STOPPED'].includes(this.status.status)) {
await delay(1000 * 1); // 1 seconds
if (this.status.status === 'RUNNING') {
this.experimentProfile.execDuration += 1;
......@@ -461,7 +461,7 @@ class NNIManager implements Manager {
}
let allFinishedTrialJobNum: number = this.currSubmittedTrialNum;
let waitSubmittedToFinish: number;
while (this.status.status !== 'STOPPING' && this.status.status !== 'STOPPED') {
while (!['ERROR', 'STOPPING', 'STOPPED'].includes(this.status.status)) {
const finishedTrialJobNum: number = await this.requestTrialJobsStatus();
allFinishedTrialJobNum += finishedTrialJobNum;
......@@ -573,13 +573,13 @@ class NNIManager implements Manager {
await Promise.all([
this.periodicallyUpdateExecDuration(),
this.pingDispatcher().catch((err: Error) => {
throw new NNIError('Dispatcher error', `Dispatcher error: ${err.message}`, err);
throw NNIError.FromError(err, 'Dispatcher error: ');
}),
this.trainingService.run().catch((err: Error) => {
throw new NNIError('Training service error', `Training service error: ${err.message}`, err);
throw NNIError.FromError(err, 'Training service error: ');
}),
this.manageTrials().catch((err: Error) => {
throw new NNIError('Job management error', `Job management error: ${err.message}`, err);
throw NNIError.FromError(err, 'Job management error: ');
})]);
}
......@@ -591,13 +591,13 @@ class NNIManager implements Manager {
}
this.trainingService.addTrialJobMetricListener((metric: TrialJobMetric) => {
this.onTrialJobMetrics(metric).catch((err: Error) => {
this.criticalError(new NNIError('Job metrics error', `Job metrics error: ${err.message}`, err));
this.criticalError(NNIError.FromError(err, 'Job metrics error: '));
});
});
this.dispatcher.onCommand((commandType: string, content: string) => {
this.onTunerCommand(commandType, content).catch((err: Error) => {
this.criticalError(new NNIError('Tuner command event error', `Tuner command event error: ${err.message}`, err));
this.criticalError(NNIError.FromError(err, 'Tuner command event error: '));
});
});
}
......@@ -671,7 +671,9 @@ class NNIManager implements Manager {
'ADD_HYPERPARAMETER', tunerCommand.trial_job_id, content, undefined);
break;
case NO_MORE_TRIAL_JOBS:
this.setStatus('TUNER_NO_MORE_TRIAL');
if (!['ERROR', 'STOPPING', 'STOPPED'].includes(this.status.status)) {
this.setStatus('TUNER_NO_MORE_TRIAL');
}
break;
case KILL_TRIAL_JOB:
this.log.info(`cancelTrialJob: ${JSON.parse(content)}`);
......
......@@ -73,8 +73,13 @@ class GPUScheduler {
public getAvailableGPUIndices(): number[] {
if (this.gpuSummary !== undefined) {
return this.gpuSummary.gpuInfos.filter((info: GPUInfo) => info.activeProcessNum === 0)
.map((info: GPUInfo) => info.index);
if(process.platform === 'win32') {
return this.gpuSummary.gpuInfos.map((info: GPUInfo) => info.index);
}
else{
return this.gpuSummary.gpuInfos.filter((info: GPUInfo) => info.activeProcessNum === 0)
.map((info: GPUInfo) => info.index);
}
}
return [];
......@@ -100,12 +105,16 @@ class GPUScheduler {
}
private async updateGPUSummary(): Promise<void> {
const cmdresult: cpp.childProcessPromise.Result =
await execTail(path.join(this.gpuMetricCollectorScriptFolder, 'gpu_metrics'));
if (cmdresult && cmdresult.stdout) {
this.gpuSummary = <GPUSummary>JSON.parse(cmdresult.stdout);
} else {
this.log.error('Could not get gpu metrics information!');
let gpuMetricPath = path.join(this.gpuMetricCollectorScriptFolder, 'gpu_metrics');
if (fs.existsSync(gpuMetricPath)) {
const cmdresult: cpp.childProcessPromise.Result = await execTail(gpuMetricPath);
if (cmdresult && cmdresult.stdout) {
this.gpuSummary = <GPUSummary>JSON.parse(cmdresult.stdout);
} else {
this.log.error('Could not get gpu metrics information!');
}
} else{
this.log.warning('gpu_metrics file does not exist!')
}
}
}
......
......@@ -21,7 +21,6 @@
bohb_advisor.py
'''
from enum import Enum, unique
import sys
import math
import logging
......@@ -32,7 +31,7 @@ import ConfigSpace.hyperparameters as CSH
from nni.protocol import CommandType, send
from nni.msg_dispatcher_base import MsgDispatcherBase
from nni.utils import extract_scalar_reward
from nni.utils import OptimizeMode, extract_scalar_reward
from .config_generator import CG_BOHB
......@@ -42,12 +41,6 @@ _next_parameter_id = 0
_KEY = 'TRIAL_BUDGET'
_epsilon = 1e-6
@unique
class OptimizeMode(Enum):
"""Optimize Mode class"""
Minimize = 'minimize'
Maximize = 'maximize'
def create_parameter_id():
"""Create an id
......
......@@ -18,61 +18,34 @@
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
evolution_tuner.py including:
class OptimizeMode
class Individual
class EvolutionTuner
evolution_tuner.py
"""
import copy
from enum import Enum, unique
import random
import numpy as np
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
from .. import parameter_expressions
@unique
class OptimizeMode(Enum):
"""Optimize Mode class
from nni.utils import NodeType, OptimizeMode, extract_scalar_reward, split_index
if OptimizeMode is 'minimize', it means the tuner need to minimize the reward
that received from Trial.
import nni.parameter_expressions as parameter_expressions
if OptimizeMode is 'maximize', it means the tuner need to maximize the reward
that received from Trial.
"""
Minimize = 'minimize'
Maximize = 'maximize'
@unique
class NodeType(Enum):
"""Node Type class
"""
Root = 'root'
Type = '_type'
Value = '_value'
Index = '_index'
def json2space(x, oldy=None, name=NodeType.Root.value):
def json2space(x, oldy=None, name=NodeType.ROOT):
"""Change search space from json format to hyperopt format
"""
y = list()
if isinstance(x, dict):
if NodeType.Type.value in x.keys():
_type = x[NodeType.Type.value]
if NodeType.TYPE in x.keys():
_type = x[NodeType.TYPE]
name = name + '-' + _type
if _type == 'choice':
if oldy != None:
_index = oldy[NodeType.Index.value]
y += json2space(x[NodeType.Value.value][_index],
oldy[NodeType.Value.value], name=name+'[%d]' % _index)
_index = oldy[NodeType.INDEX]
y += json2space(x[NodeType.VALUE][_index],
oldy[NodeType.VALUE], name=name+'[%d]' % _index)
else:
y += json2space(x[NodeType.Value.value], None, name=name)
y += json2space(x[NodeType.VALUE], None, name=name)
y.append(name)
else:
for key in x.keys():
......@@ -80,28 +53,28 @@ def json2space(x, oldy=None, name=NodeType.Root.value):
None else None), name+"[%s]" % str(key))
elif isinstance(x, list):
for i, x_i in enumerate(x):
if isinstance(x_i, dict):
if NodeType.NAME not in x_i.keys():
raise RuntimeError('\'_name\' key is not found in this nested search space.')
y += json2space(x_i, (oldy[i] if oldy !=
None else None), name+"[%d]" % i)
else:
pass
return y
def json2paramater(x, is_rand, random_state, oldy=None, Rand=False, name=NodeType.Root.value):
def json2parameter(x, is_rand, random_state, oldy=None, Rand=False, name=NodeType.ROOT):
"""Json to pramaters.
"""
if isinstance(x, dict):
if NodeType.Type.value in x.keys():
_type = x[NodeType.Type.value]
_value = x[NodeType.Value.value]
if NodeType.TYPE in x.keys():
_type = x[NodeType.TYPE]
_value = x[NodeType.VALUE]
name = name + '-' + _type
Rand |= is_rand[name]
if Rand is True:
if _type == 'choice':
_index = random_state.randint(len(_value))
y = {
NodeType.Index.value: _index,
NodeType.Value.value: json2paramater(x[NodeType.Value.value][_index],
NodeType.INDEX: _index,
NodeType.VALUE: json2parameter(x[NodeType.VALUE][_index],
is_rand,
random_state,
None,
......@@ -116,39 +89,20 @@ def json2paramater(x, is_rand, random_state, oldy=None, Rand=False, name=NodeTyp
else:
y = dict()
for key in x.keys():
y[key] = json2paramater(x[key], is_rand, random_state, oldy[key]
y[key] = json2parameter(x[key], is_rand, random_state, oldy[key]
if oldy != None else None, Rand, name + "[%s]" % str(key))
elif isinstance(x, list):
y = list()
for i, x_i in enumerate(x):
y.append(json2paramater(x_i, is_rand, random_state, oldy[i]
if isinstance(x_i, dict):
if NodeType.NAME not in x_i.keys():
raise RuntimeError('\'_name\' key is not found in this nested search space.')
y.append(json2parameter(x_i, is_rand, random_state, oldy[i]
if oldy != None else None, Rand, name + "[%d]" % i))
else:
y = copy.deepcopy(x)
return y
def _split_index(params):
"""Delete index information from params
Parameters
----------
params : dict
Returns
-------
result : dict
"""
result = {}
for key in params:
if isinstance(params[key], dict):
value = params[key]['_value']
else:
value = params[key]
result[key] = value
return result
class Individual(object):
"""
Indicidual class to store the indv info.
......@@ -229,7 +183,7 @@ class EvolutionTuner(Tuner):
for item in self.space:
is_rand[item] = True
for _ in range(self.population_size):
config = json2paramater(
config = json2parameter(
self.searchspace_json, is_rand, self.random_state)
self.population.append(Individual(config=config))
......@@ -267,14 +221,14 @@ class EvolutionTuner(Tuner):
mutation_pos = space[random.randint(0, len(space)-1)]
for i in range(len(self.space)):
is_rand[self.space[i]] = (self.space[i] == mutation_pos)
config = json2paramater(
config = json2parameter(
self.searchspace_json, is_rand, self.random_state, self.population[0].config)
self.population.pop(1)
# remove "_index" from config and save params-id
total_config = config
self.total_data[parameter_id] = total_config
config = _split_index(total_config)
config = split_index(total_config)
return config
def receive_trial_result(self, parameter_id, parameters, value):
......
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
test_evolution_tuner.py
"""
import numpy as np
from unittest import TestCase, main
from nni.evolution_tuner.evolution_tuner import json2space, json2parameter
class EvolutionTunerTestCase(TestCase):
def test_json2space(self):
"""test for json2space
"""
json_search_space = {
"optimizer": {
"_type": "choice",
"_value": ["Adam", "SGD"]
},
"learning_rate": {
"_type": "choice",
"_value": [0.0001, 0.001, 0.002, 0.005, 0.01]
}
}
search_space_instance = json2space(json_search_space)
self.assertIn('root[optimizer]-choice', search_space_instance)
self.assertIn('root[learning_rate]-choice', search_space_instance)
def test_json2parameter(self):
"""test for json2parameter
"""
json_search_space = {
"optimizer":{
"_type":"choice","_value":["Adam", "SGD"]
},
"learning_rate":{
"_type":"choice",
"_value":[0.0001, 0.001, 0.002, 0.005, 0.01]
}
}
space = json2space(json_search_space)
random_state = np.random.RandomState()
is_rand = dict()
for item in space:
is_rand[item] = True
search_space_instance = json2parameter(json_search_space, is_rand, random_state)
self.assertIn(search_space_instance["optimizer"]["_index"], range(2))
self.assertIn(search_space_instance["optimizer"]["_value"], ["Adam", "SGD"])
self.assertIn(search_space_instance["learning_rate"]["_index"], range(5))
self.assertIn(search_space_instance["learning_rate"]["_value"], [0.0001, 0.001, 0.002, 0.005, 0.01])
if __name__ == '__main__':
main()
......@@ -56,7 +56,7 @@ class GridSearchTuner(Tuner):
self.expanded_search_space = []
self.supplement_data = dict()
def json2paramater(self, ss_spec):
def json2parameter(self, ss_spec):
'''
generate all possible configs for hyperparameters from hyperparameter space.
ss_spec: hyperparameter space
......@@ -68,7 +68,7 @@ class GridSearchTuner(Tuner):
chosen_params = list()
if _type == 'choice':
for value in _value:
choice = self.json2paramater(value)
choice = self.json2parameter(value)
if isinstance(choice, list):
chosen_params.extend(choice)
else:
......@@ -78,12 +78,12 @@ class GridSearchTuner(Tuner):
else:
chosen_params = dict()
for key in ss_spec.keys():
chosen_params[key] = self.json2paramater(ss_spec[key])
chosen_params[key] = self.json2parameter(ss_spec[key])
return self.expand_parameters(chosen_params)
elif isinstance(ss_spec, list):
chosen_params = list()
for subspec in ss_spec[1:]:
choice = self.json2paramater(subspec)
choice = self.json2parameter(subspec)
if isinstance(choice, list):
chosen_params.extend(choice)
else:
......@@ -135,7 +135,7 @@ class GridSearchTuner(Tuner):
'''
Check if the search space is valid and expand it: only contains 'choice' type or other types beginnning with the letter 'q'
'''
self.expanded_search_space = self.json2paramater(search_space)
self.expanded_search_space = self.json2parameter(search_space)
def generate_parameters(self, parameter_id):
self.count += 1
......
......@@ -21,7 +21,6 @@
hyperband_advisor.py
"""
from enum import Enum, unique
import sys
import math
import copy
......@@ -31,8 +30,9 @@ import json_tricks
from nni.protocol import CommandType, send
from nni.msg_dispatcher_base import MsgDispatcherBase
from nni.utils import extract_scalar_reward
from .. import parameter_expressions
from nni.common import init_logger
from nni.utils import NodeType, OptimizeMode, extract_scalar_reward
import nni.parameter_expressions as parameter_expressions
_logger = logging.getLogger(__name__)
......@@ -40,11 +40,6 @@ _next_parameter_id = 0
_KEY = 'TRIAL_BUDGET'
_epsilon = 1e-6
@unique
class OptimizeMode(Enum):
"""Oprimize Mode class"""
Minimize = 'minimize'
Maximize = 'maximize'
def create_parameter_id():
"""Create an id
......@@ -82,7 +77,7 @@ def create_bracket_parameter_id(brackets_id, brackets_curr_decay, increased_id=-
increased_id])
return params_id
def json2paramater(ss_spec, random_state):
def json2parameter(ss_spec, random_state):
"""Randomly generate values for hyperparameters from hyperparameter space i.e., x.
Parameters
......@@ -98,23 +93,23 @@ def json2paramater(ss_spec, random_state):
Parameters in this experiment
"""
if isinstance(ss_spec, dict):
if '_type' in ss_spec.keys():
_type = ss_spec['_type']
_value = ss_spec['_value']
if NodeType.TYPE in ss_spec.keys():
_type = ss_spec[NodeType.TYPE]
_value = ss_spec[NodeType.VALUE]
if _type == 'choice':
_index = random_state.randint(len(_value))
chosen_params = json2paramater(ss_spec['_value'][_index], random_state)
chosen_params = json2parameter(ss_spec[NodeType.VALUE][_index], random_state)
else:
chosen_params = eval('parameter_expressions.' + # pylint: disable=eval-used
_type)(*(_value + [random_state]))
else:
chosen_params = dict()
for key in ss_spec.keys():
chosen_params[key] = json2paramater(ss_spec[key], random_state)
chosen_params[key] = json2parameter(ss_spec[key], random_state)
elif isinstance(ss_spec, list):
chosen_params = list()
for _, subspec in enumerate(ss_spec):
chosen_params.append(json2paramater(subspec, random_state))
chosen_params.append(json2parameter(subspec, random_state))
else:
chosen_params = copy.deepcopy(ss_spec)
return chosen_params
......@@ -246,7 +241,7 @@ class Bracket():
hyperparameter_configs = dict()
for _ in range(num):
params_id = create_bracket_parameter_id(self.bracket_id, self.i)
params = json2paramater(searchspace_json, random_state)
params = json2parameter(searchspace_json, random_state)
params[_KEY] = r
hyperparameter_configs[params_id] = params
self._record_hyper_configs(hyperparameter_configs)
......
......@@ -17,39 +17,22 @@
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
"""
hyperopt_tuner.py
'''
"""
import copy
import logging
from enum import Enum, unique
import numpy as np
import hyperopt as hp
import numpy as np
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
from nni.utils import NodeType, OptimizeMode, extract_scalar_reward, split_index
logger = logging.getLogger('hyperopt_AutoML')
@unique
class OptimizeMode(Enum):
"""
Optimize Mode including Minimize and Maximize
"""
Minimize = 'minimize'
Maximize = 'maximize'
ROOT = 'root'
TYPE = '_type'
VALUE = '_value'
INDEX = '_index'
def json2space(in_x, name=ROOT):
def json2space(in_x, name=NodeType.ROOT):
"""
Change json to search space in hyperopt.
......@@ -58,16 +41,16 @@ def json2space(in_x, name=ROOT):
in_x : dict/list/str/int/float
The part of json.
name : str
name could be ROOT, TYPE, VALUE or INDEX.
name could be NodeType.ROOT, NodeType.TYPE, NodeType.VALUE or NodeType.INDEX, NodeType.NAME.
"""
out_y = copy.deepcopy(in_x)
if isinstance(in_x, dict):
if TYPE in in_x.keys():
_type = in_x[TYPE]
if NodeType.TYPE in in_x.keys():
_type = in_x[NodeType.TYPE]
name = name + '-' + _type
_value = json2space(in_x[VALUE], name=name)
_value = json2space(in_x[NodeType.VALUE], name=name)
if _type == 'choice':
out_y = eval('hp.hp.'+_type)(name, _value)
out_y = eval('hp.hp.choice')(name, _value)
else:
if _type in ['loguniform', 'qloguniform']:
_value[:2] = np.log(_value[:2])
......@@ -75,69 +58,92 @@ def json2space(in_x, name=ROOT):
else:
out_y = dict()
for key in in_x.keys():
out_y[key] = json2space(in_x[key], name+'[%s]' % str(key))
out_y[key] = json2space(in_x[key], name + '[%s]' % str(key))
elif isinstance(in_x, list):
out_y = list()
for i, x_i in enumerate(in_x):
out_y.append(json2space(x_i, name+'[%d]' % i))
else:
logger.info('in_x is not a dict or a list in json2space fuinction %s', str(in_x))
if isinstance(x_i, dict):
if NodeType.NAME not in x_i.keys():
raise RuntimeError(
'\'_name\' key is not found in this nested search space.'
)
out_y.append(json2space(x_i, name + '[%d]' % i))
return out_y
def json2parameter(in_x, parameter, name=ROOT):
def json2parameter(in_x, parameter, name=NodeType.ROOT):
"""
Change json to parameters.
"""
out_y = copy.deepcopy(in_x)
if isinstance(in_x, dict):
if TYPE in in_x.keys():
_type = in_x[TYPE]
if NodeType.TYPE in in_x.keys():
_type = in_x[NodeType.TYPE]
name = name + '-' + _type
if _type == 'choice':
_index = parameter[name]
out_y = {
INDEX: _index,
VALUE: json2parameter(in_x[VALUE][_index], parameter, name=name+'[%d]' % _index)
NodeType.INDEX:
_index,
NodeType.VALUE:
json2parameter(in_x[NodeType.VALUE][_index],
parameter,
name=name + '[%d]' % _index)
}
else:
out_y = parameter[name]
else:
out_y = dict()
for key in in_x.keys():
out_y[key] = json2parameter(
in_x[key], parameter, name + '[%s]' % str(key))
out_y[key] = json2parameter(in_x[key], parameter,
name + '[%s]' % str(key))
elif isinstance(in_x, list):
out_y = list()
for i, x_i in enumerate(in_x):
if isinstance(x_i, dict):
if NodeType.NAME not in x_i.keys():
raise RuntimeError(
'\'_name\' key is not found in this nested search space.'
)
out_y.append(json2parameter(x_i, parameter, name + '[%d]' % i))
else:
logger.info('in_x is not a dict or a list in json2space fuinction %s', str(in_x))
return out_y
def json2vals(in_x, vals, out_y, name=ROOT):
def json2vals(in_x, vals, out_y, name=NodeType.ROOT):
if isinstance(in_x, dict):
if TYPE in in_x.keys():
_type = in_x[TYPE]
if NodeType.TYPE in in_x.keys():
_type = in_x[NodeType.TYPE]
name = name + '-' + _type
try:
out_y[name] = vals[INDEX]
out_y[name] = vals[NodeType.INDEX]
# TODO - catch exact Exception
except Exception:
out_y[name] = vals
if _type == 'choice':
_index = vals[INDEX]
json2vals(in_x[VALUE][_index], vals[VALUE],
out_y, name=name + '[%d]' % _index)
_index = vals[NodeType.INDEX]
json2vals(in_x[NodeType.VALUE][_index],
vals[NodeType.VALUE],
out_y,
name=name + '[%d]' % _index)
else:
for key in in_x.keys():
json2vals(in_x[key], vals[key], out_y, name + '[%s]' % str(key))
json2vals(in_x[key], vals[key], out_y,
name + '[%s]' % str(key))
elif isinstance(in_x, list):
for i, temp in enumerate(in_x):
json2vals(temp, vals[i], out_y, name + '[%d]' % i)
# nested json
if isinstance(temp, dict):
if NodeType.NAME not in temp.keys():
raise RuntimeError(
'\'_name\' key is not found in this nested search space.'
)
else:
json2vals(temp, vals[i], out_y, name + '[%d]' % i)
else:
json2vals(temp, vals[i], out_y, name + '[%d]' % i)
def _add_index(in_x, parameter):
"""
......@@ -156,41 +162,36 @@ def _add_index(in_x, parameter):
value_type = in_x[TYPE]
value_format = in_x[VALUE]
if value_type == "choice":
choice_name = parameter[0] if isinstance(parameter, list) else parameter
for pos, item in enumerate(value_format): # here value_format is a list
if isinstance(item, list): # this format is ["choice_key", format_dict]
choice_name = parameter[0] if isinstance(parameter,
list) else parameter
for pos, item in enumerate(
value_format): # here value_format is a list
if isinstance(
item,
list): # this format is ["choice_key", format_dict]
choice_key = item[0]
choice_value_format = item[1]
if choice_key == choice_name:
return {INDEX: pos, VALUE: [choice_name, _add_index(choice_value_format, parameter[1])]}
return {
INDEX:
pos,
VALUE: [
choice_name,
_add_index(choice_value_format, parameter[1])
]
}
elif choice_name == item:
return {INDEX: pos, VALUE: item}
else:
return parameter
def _split_index(params):
"""
Delete index infromation from params
"""
if isinstance(params, list):
return [params[0], _split_index(params[1])]
elif isinstance(params, dict):
if INDEX in params.keys():
return _split_index(params[VALUE])
result = dict()
for key in params:
result[key] = _split_index(params[key])
return result
else:
return params
class HyperoptTuner(Tuner):
"""
HyperoptTuner is a tuner which using hyperopt algorithm.
"""
def __init__(self, algorithm_name, optimize_mode = 'minimize'):
def __init__(self, algorithm_name, optimize_mode='minimize'):
"""
Parameters
----------
......@@ -234,11 +235,16 @@ class HyperoptTuner(Tuner):
search_space_instance = json2space(self.json)
rstate = np.random.RandomState()
trials = hp.Trials()
domain = hp.Domain(None, search_space_instance,
domain = hp.Domain(None,
search_space_instance,
pass_expr_memo_ctrl=None)
algorithm = self._choose_tuner(self.algorithm_name)
self.rval = hp.FMinIter(algorithm, domain, trials,
max_evals=-1, rstate=rstate, verbose=0)
self.rval = hp.FMinIter(algorithm,
domain,
trials,
max_evals=-1,
rstate=rstate,
verbose=0)
self.rval.catch_eval_exceptions = False
def generate_parameters(self, parameter_id):
......@@ -259,7 +265,7 @@ class HyperoptTuner(Tuner):
# but it can cause deplicate parameter rarely
total_params = self.get_suggestion(random_search=True)
self.total_data[parameter_id] = total_params
params = _split_index(total_params)
params = split_index(total_params)
return params
def receive_trial_result(self, parameter_id, parameters, value):
......@@ -300,7 +306,7 @@ class HyperoptTuner(Tuner):
json2vals(self.json, vals, out_y)
vals = out_y
for key in domain.params:
if key in [VALUE, INDEX]:
if key in [NodeType.VALUE, NodeType.INDEX]:
continue
if key not in vals or vals[key] is None or vals[key] == []:
idxs[key] = vals[key] = []
......@@ -308,17 +314,23 @@ class HyperoptTuner(Tuner):
idxs[key] = [new_id]
vals[key] = [vals[key]]
self.miscs_update_idxs_vals(rval_miscs, idxs, vals,
self.miscs_update_idxs_vals(rval_miscs,
idxs,
vals,
idxs_map={new_id: new_id},
assert_all_vals_used=False)
trial = trials.new_trial_docs([new_id], rval_specs, rval_results, rval_miscs)[0]
trial = trials.new_trial_docs([new_id], rval_specs, rval_results,
rval_miscs)[0]
trial['result'] = {'loss': reward, 'status': 'ok'}
trial['state'] = hp.JOB_STATE_DONE
trials.insert_trial_docs([trial])
trials.refresh()
def miscs_update_idxs_vals(self, miscs, idxs, vals,
def miscs_update_idxs_vals(self,
miscs,
idxs,
vals,
assert_all_vals_used=True,
idxs_map=None):
"""
......@@ -368,9 +380,10 @@ class HyperoptTuner(Tuner):
algorithm = rval.algo
new_ids = rval.trials.new_trial_ids(1)
rval.trials.refresh()
random_state = rval.rstate.randint(2**31-1)
random_state = rval.rstate.randint(2**31 - 1)
if random_search:
new_trials = hp.rand.suggest(new_ids, rval.domain, trials, random_state)
new_trials = hp.rand.suggest(new_ids, rval.domain, trials,
random_state)
else:
new_trials = algorithm(new_ids, rval.domain, trials, random_state)
rval.trials.refresh()
......@@ -396,7 +409,8 @@ class HyperoptTuner(Tuner):
"""
_completed_num = 0
for trial_info in data:
logger.info("Importing data, current processing progress %s / %s" %(_completed_num, len(data)))
logger.info("Importing data, current processing progress %s / %s" %
(_completed_num, len(data)))
_completed_num += 1
if self.algorithm_name == 'random_search':
return
......@@ -405,10 +419,16 @@ class HyperoptTuner(Tuner):
assert "value" in trial_info
_value = trial_info['value']
if not _value:
logger.info("Useless trial data, value is %s, skip this trial data." %_value)
logger.info(
"Useless trial data, value is %s, skip this trial data." %
_value)
continue
self.supplement_data_num += 1
_parameter_id = '_'.join(["ImportData", str(self.supplement_data_num)])
self.total_data[_parameter_id] = _add_index(in_x=self.json, parameter=_params)
self.receive_trial_result(parameter_id=_parameter_id, parameters=_params, value=_value)
_parameter_id = '_'.join(
["ImportData", str(self.supplement_data_num)])
self.total_data[_parameter_id] = _add_index(in_x=self.json,
parameter=_params)
self.receive_trial_result(parameter_id=_parameter_id,
parameters=_params,
value=_value)
logger.info("Successfully import data to TPE/Anneal tuner.")
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
test_hyperopt_tuner.py
"""
from unittest import TestCase, main
import hyperopt as hp
from nni.hyperopt_tuner.hyperopt_tuner import json2space, json2parameter, json2vals
class HyperoptTunerTestCase(TestCase):
def test_json2space(self):
"""test for json2space
"""
json_search_space = {
"optimizer": {
"_type": "choice",
"_value": ["Adam", "SGD"]
},
"learning_rate": {
"_type": "choice",
"_value": [0.0001, 0.001, 0.002, 0.005, 0.01]
}
}
search_space_instance = json2space(json_search_space)
self.assertIsInstance(search_space_instance["optimizer"],
hp.pyll.base.Apply)
self.assertIsInstance(search_space_instance["learning_rate"],
hp.pyll.base.Apply)
def test_json2parameter(self):
"""test for json2parameter
"""
json_search_space = {
"optimizer": {
"_type": "choice",
"_value": ["Adam", "SGD"]
},
"learning_rate": {
"_type": "choice",
"_value": [0.0001, 0.001, 0.002, 0.005, 0.01]
}
}
parameter = {
'root[learning_rate]-choice': 2,
'root[optimizer]-choice': 0
}
search_space_instance = json2parameter(json_search_space, parameter)
self.assertEqual(search_space_instance["optimizer"]["_index"], 0)
self.assertEqual(search_space_instance["optimizer"]["_value"], "Adam")
self.assertEqual(search_space_instance["learning_rate"]["_index"], 2)
self.assertEqual(search_space_instance["learning_rate"]["_value"], 0.002)
def test_json2vals(self):
"""test for json2vals
"""
json_search_space = {
"optimizer": {
"_type": "choice",
"_value": ["Adam", "SGD"]
},
"learning_rate": {
"_type": "choice",
"_value": [0.0001, 0.001, 0.002, 0.005, 0.01]
}
}
out_y = dict()
vals = {
'optimizer': {
'_index': 0,
'_value': 'Adam'
},
'learning_rate': {
'_index': 1,
'_value': 0.001
}
}
json2vals(json_search_space, vals, out_y)
self.assertEqual(out_y["root[optimizer]-choice"], 0)
self.assertEqual(out_y["root[learning_rate]-choice"], 1)
if __name__ == '__main__':
main()
......@@ -38,17 +38,10 @@ import nni.metis_tuner.Regression_GP.OutlierDetection as gp_outlier_detection
import nni.metis_tuner.Regression_GP.Prediction as gp_prediction
import nni.metis_tuner.Regression_GP.Selection as gp_selection
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
from nni.utils import OptimizeMode, extract_scalar_reward
logger = logging.getLogger("Metis_Tuner_AutoML")
@unique
class OptimizeMode(Enum):
"""
Optimize Mode class
"""
Minimize = 'minimize'
Maximize = 'maximize'
NONE_TYPE = ''
......
......@@ -23,10 +23,10 @@ import os
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
from nni.utils import OptimizeMode, extract_scalar_reward
from nni.networkmorphism_tuner.bayesian import BayesianOptimizer
from nni.networkmorphism_tuner.nn import CnnGenerator, MlpGenerator
from nni.networkmorphism_tuner.utils import Constant, OptimizeMode
from nni.networkmorphism_tuner.utils import Constant
from nni.networkmorphism_tuner.graph import graph_to_json, json_to_graph
......
......@@ -18,16 +18,6 @@
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
from enum import Enum, unique
@unique
class OptimizeMode(Enum):
"""
Oprimize Mode class
"""
Minimize = "minimize"
Maximize = "maximize"
class Constant:
'''Constant for the Tuner.
......
......@@ -22,7 +22,7 @@ smac_tuner.py
"""
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
from nni.utils import OptimizeMode, extract_scalar_reward
import sys
import logging
......@@ -37,11 +37,6 @@ from smac.facade.smac_facade import SMAC
from smac.facade.roar_facade import ROAR
from smac.facade.epils_facade import EPILS
@unique
class OptimizeMode(Enum):
"""Oprimize Mode class"""
Minimize = 'minimize'
Maximize = 'maximize'
class SMACTuner(Tuner):
"""
......
......@@ -17,11 +17,54 @@
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
"""
utils.py
"""
import os
from enum import Enum, unique
from .common import init_logger
from .env_vars import dispatcher_env_vars
@unique
class OptimizeMode(Enum):
"""Optimize Mode class
if OptimizeMode is 'minimize', it means the tuner need to minimize the reward
that received from Trial.
if OptimizeMode is 'maximize', it means the tuner need to maximize the reward
that received from Trial.
"""
Minimize = 'minimize'
Maximize = 'maximize'
class NodeType:
"""Node Type class
"""
ROOT = 'root'
TYPE = '_type'
VALUE = '_value'
INDEX = '_index'
NAME = '_name'
def split_index(params):
"""
Delete index infromation from params
"""
if isinstance(params, dict):
if NodeType.INDEX in params.keys():
return split_index(params[NodeType.VALUE])
result = {}
for key in params:
result[key] = split_index(params[key])
return result
else:
return params
def extract_scalar_reward(value, scalar_key='default'):
"""
Extract scalar reward from trial result.
......
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
from unittest import TestCase, main
import nni
from nni.utils import split_index
class UtilsTestCase(TestCase):
def test_split_index_normal(self):
"""test for normal search space
"""
normal__params_with_index = {
"dropout_rate": {
"_index" : 1,
"_value" : 0.9
},
"hidden_size": {
"_index" : 1,
"_value" : 512
}
}
normal__params= {
"dropout_rate": 0.9,
"hidden_size": 512
}
params = split_index(normal__params_with_index)
self.assertEqual(params, normal__params)
def test_split_index_nested(self):
"""test for nested search space
"""
nested_params_with_index = {
"layer0": {
"_name": "Avg_pool",
"pooling_size":{
"_index" : 1,
"_value" : 2
}
},
"layer1": {
"_name": "Empty"
},
"layer2": {
"_name": "Max_pool",
"pooling_size": {
"_index" : 2,
"_value" : 3
}
},
"layer3": {
"_name": "Conv",
"kernel_size": {
"_index" : 3,
"_value" : 5
},
"output_filters": {
"_index" : 3,
"_value" : 64
}
}
}
nested_params = {
"layer0": {
"_name": "Avg_pool",
"pooling_size": 2
},
"layer1": {
"_name": "Empty"
},
"layer2": {
"_name": "Max_pool",
"pooling_size": 3
},
"layer3": {
"_name": "Conv",
"kernel_size": 5,
"output_filters": 64
}
}
params = split_index(nested_params_with_index)
self.assertEqual(params, nested_params)
if __name__ == '__main__':
main()
\ No newline at end of file
......@@ -23,7 +23,8 @@ interface TrialDetailState {
experimentStatus: string;
experimentPlatform: string;
experimentLogCollection: boolean;
entriesTable: number;
entriesTable: number; // table components val
entriesInSelect: string;
searchSpace: string;
isMultiPhase: boolean;
}
......@@ -68,6 +69,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
experimentPlatform: '',
experimentLogCollection: false,
entriesTable: 20,
entriesInSelect: '20',
isHasSearch: false,
searchSpace: '',
isMultiPhase: false
......@@ -82,7 +84,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
axios.get(`${MANAGER_IP}/metric-data`)
])
.then(axios.spread((res, res1) => {
if (res.status === 200) {
if (res.status === 200 && res1.status === 200) {
const trialJobs = res.data;
const metricSource = res1.data;
const trialTable: Array<TableObj> = [];
......@@ -149,7 +151,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
});
});
// update search data result
const { searchResultSource } = this.state;
const { searchResultSource, entriesInSelect } = this.state;
if (searchResultSource.length !== 0) {
const temp: Array<number> = [];
Object.keys(searchResultSource).map(index => {
......@@ -176,6 +178,11 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
tableListSource: trialTable
}));
}
if (entriesInSelect === 'all' && this._isMounted) {
this.setState(() => ({
entriesTable: trialTable.length
}));
}
}
}));
}
......@@ -198,7 +205,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
const item = tableListSource[key];
if (item.sequenceId.toString() === targetValue
|| item.id.includes(targetValue)
|| item.status.includes(targetValue)
|| item.status.toUpperCase().includes(targetValue.toUpperCase())
) {
searchResultList.push(item);
}
......@@ -244,7 +251,12 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
break;
case 'all':
const { tableListSource } = this.state;
this.setState(() => ({ entriesTable: tableListSource.length }));
if (this._isMounted) {
this.setState(() => ({
entriesInSelect: 'all',
entriesTable: tableListSource.length
}));
}
break;
default:
}
......@@ -270,7 +282,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
const logCollection = res.data.params.logCollection;
let expLogCollection: boolean = false;
const isMultiy: boolean = res.data.params.multiPhase !== undefined
? res.data.params.multiPhase : false;
? res.data.params.multiPhase : false;
if (logCollection !== undefined && logCollection !== 'none') {
expLogCollection = true;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment