Unverified Commit e1a4a80a authored by SparkSnail's avatar SparkSnail Committed by GitHub
Browse files

Merge pull request #175 from microsoft/master

merge master
parents bee8f84e e267a737
...@@ -31,7 +31,7 @@ import json_tricks ...@@ -31,7 +31,7 @@ import json_tricks
from nni.protocol import CommandType, send from nni.protocol import CommandType, send
from nni.msg_dispatcher_base import MsgDispatcherBase from nni.msg_dispatcher_base import MsgDispatcherBase
from nni.common import init_logger from nni.common import init_logger
from nni.utils import NodeType, OptimizeMode, extract_scalar_reward from nni.utils import NodeType, OptimizeMode, extract_scalar_reward, randint_to_quniform
import nni.parameter_expressions as parameter_expressions import nni.parameter_expressions as parameter_expressions
_logger = logging.getLogger(__name__) _logger = logging.getLogger(__name__)
...@@ -357,6 +357,7 @@ class Hyperband(MsgDispatcherBase): ...@@ -357,6 +357,7 @@ class Hyperband(MsgDispatcherBase):
number of trial jobs number of trial jobs
""" """
self.searchspace_json = data self.searchspace_json = data
randint_to_quniform(self.searchspace_json)
self.random_state = np.random.RandomState() self.random_state = np.random.RandomState()
def handle_trial_end(self, data): def handle_trial_end(self, data):
......
...@@ -27,7 +27,7 @@ import logging ...@@ -27,7 +27,7 @@ import logging
import hyperopt as hp import hyperopt as hp
import numpy as np import numpy as np
from nni.tuner import Tuner from nni.tuner import Tuner
from nni.utils import NodeType, OptimizeMode, extract_scalar_reward, split_index from nni.utils import NodeType, OptimizeMode, extract_scalar_reward, split_index, randint_to_quniform
logger = logging.getLogger('hyperopt_AutoML') logger = logging.getLogger('hyperopt_AutoML')
...@@ -231,6 +231,8 @@ class HyperoptTuner(Tuner): ...@@ -231,6 +231,8 @@ class HyperoptTuner(Tuner):
search_space : dict search_space : dict
""" """
self.json = search_space self.json = search_space
randint_to_quniform(self.json)
search_space_instance = json2space(self.json) search_space_instance = json2space(self.json)
rstate = np.random.RandomState() rstate = np.random.RandomState()
trials = hp.Trials() trials = hp.Trials()
......
...@@ -133,7 +133,7 @@ class MetisTuner(Tuner): ...@@ -133,7 +133,7 @@ class MetisTuner(Tuner):
self.x_bounds[idx] = bounds self.x_bounds[idx] = bounds
self.x_types[idx] = 'discrete_int' self.x_types[idx] = 'discrete_int'
elif key_type == 'randint': elif key_type == 'randint':
self.x_bounds[idx] = [0, key_range[0]] self.x_bounds[idx] = [key_range[0], key_range[1]]
self.x_types[idx] = 'range_int' self.x_types[idx] = 'range_int'
elif key_type == 'uniform': elif key_type == 'uniform':
self.x_bounds[idx] = [key_range[0], key_range[1]] self.x_bounds[idx] = [key_range[0], key_range[1]]
......
...@@ -37,6 +37,9 @@ from ConfigSpaceNNI import Configuration ...@@ -37,6 +37,9 @@ from ConfigSpaceNNI import Configuration
from .convert_ss_to_scenario import generate_scenario from .convert_ss_to_scenario import generate_scenario
from nni.tuner import Tuner
from nni.utils import OptimizeMode, extract_scalar_reward, randint_to_quniform
class SMACTuner(Tuner): class SMACTuner(Tuner):
""" """
...@@ -136,6 +139,7 @@ class SMACTuner(Tuner): ...@@ -136,6 +139,7 @@ class SMACTuner(Tuner):
search_space: search_space:
search space search space
""" """
randint_to_quniform(search_space)
if not self.update_ss_done: if not self.update_ss_done:
self.categorical_dict = generate_scenario(search_space) self.categorical_dict = generate_scenario(search_space)
if self.categorical_dict is None: if self.categorical_dict is None:
......
...@@ -36,7 +36,8 @@ __all__ = [ ...@@ -36,7 +36,8 @@ __all__ = [
'qnormal', 'qnormal',
'lognormal', 'lognormal',
'qlognormal', 'qlognormal',
'function_choice' 'function_choice',
'mutable_layer'
] ]
...@@ -78,6 +79,9 @@ if trial_env_vars.NNI_PLATFORM is None: ...@@ -78,6 +79,9 @@ if trial_env_vars.NNI_PLATFORM is None:
def function_choice(*funcs, name=None): def function_choice(*funcs, name=None):
return random.choice(funcs)() return random.choice(funcs)()
def mutable_layer():
raise RuntimeError('Cannot call nni.mutable_layer in this mode')
else: else:
def choice(options, name=None, key=None): def choice(options, name=None, key=None):
...@@ -113,6 +117,42 @@ else: ...@@ -113,6 +117,42 @@ else:
def function_choice(funcs, name=None, key=None): def function_choice(funcs, name=None, key=None):
return funcs[_get_param(key)]() return funcs[_get_param(key)]()
def mutable_layer(
mutable_id,
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size=0):
'''execute the chosen function and inputs.
Below is an example of chosen function and inputs:
{
"mutable_id": {
"mutable_layer_id": {
"chosen_layer": "pool",
"chosen_inputs": ["out1", "out3"]
}
}
}
Parameters:
---------------
mutable_id: the name of this mutable_layer block (which could have multiple mutable layers)
mutable_layer_id: the name of a mutable layer in this block
funcs: dict of function calls
funcs_args:
fixed_inputs:
optional_inputs: dict of optional inputs
optional_input_size: number of candidate inputs to be chosen
'''
mutable_block = _get_param(mutable_id)
chosen_layer = mutable_block[mutable_layer_id]["chosen_layer"]
chosen_inputs = mutable_block[mutable_layer_id]["chosen_inputs"]
real_chosen_inputs = [optional_inputs[input_name] for input_name in chosen_inputs]
layer_out = funcs[chosen_layer]([fixed_inputs, real_chosen_inputs], *funcs_args[chosen_layer])
return layer_out
def _get_param(key): def _get_param(key):
if trial._params is None: if trial._params is None:
trial.get_next_parameter() trial.get_next_parameter()
......
...@@ -40,6 +40,7 @@ class OptimizeMode(Enum): ...@@ -40,6 +40,7 @@ class OptimizeMode(Enum):
Minimize = 'minimize' Minimize = 'minimize'
Maximize = 'maximize' Maximize = 'maximize'
class NodeType: class NodeType:
"""Node Type class """Node Type class
""" """
...@@ -83,6 +84,7 @@ def extract_scalar_reward(value, scalar_key='default'): ...@@ -83,6 +84,7 @@ def extract_scalar_reward(value, scalar_key='default'):
raise RuntimeError('Incorrect final result: the final result should be float/int, or a dict which has a key named "default" whose value is float/int.') raise RuntimeError('Incorrect final result: the final result should be float/int, or a dict which has a key named "default" whose value is float/int.')
return reward return reward
def convert_dict2tuple(value): def convert_dict2tuple(value):
""" """
convert dict type to tuple to solve unhashable problem. convert dict type to tuple to solve unhashable problem.
...@@ -94,9 +96,30 @@ def convert_dict2tuple(value): ...@@ -94,9 +96,30 @@ def convert_dict2tuple(value):
else: else:
return value return value
def init_dispatcher_logger(): def init_dispatcher_logger():
""" Initialize dispatcher logging configuration""" """ Initialize dispatcher logging configuration"""
logger_file_path = 'dispatcher.log' logger_file_path = 'dispatcher.log'
if dispatcher_env_vars.NNI_LOG_DIRECTORY is not None: if dispatcher_env_vars.NNI_LOG_DIRECTORY is not None:
logger_file_path = os.path.join(dispatcher_env_vars.NNI_LOG_DIRECTORY, logger_file_path) logger_file_path = os.path.join(dispatcher_env_vars.NNI_LOG_DIRECTORY, logger_file_path)
init_logger(logger_file_path, dispatcher_env_vars.NNI_LOG_LEVEL) init_logger(logger_file_path, dispatcher_env_vars.NNI_LOG_LEVEL)
def randint_to_quniform(in_x):
if isinstance(in_x, dict):
if NodeType.TYPE in in_x.keys():
if in_x[NodeType.TYPE] == 'randint':
value = in_x[NodeType.VALUE]
value.append(1)
in_x[NodeType.TYPE] = 'quniform'
in_x[NodeType.VALUE] = value
elif in_x[NodeType.TYPE] == 'choice':
randint_to_quniform(in_x[NodeType.VALUE])
else:
for key in in_x.keys():
randint_to_quniform(in_x[key])
elif isinstance(in_x, list):
for temp in in_x:
randint_to_quniform(temp)
...@@ -25,6 +25,94 @@ from nni_cmd.common_utils import print_warning ...@@ -25,6 +25,94 @@ from nni_cmd.common_utils import print_warning
# pylint: disable=unidiomatic-typecheck # pylint: disable=unidiomatic-typecheck
def parse_annotation_mutable_layers(code, lineno):
"""Parse the string of mutable layers in annotation.
Return a list of AST Expr nodes
code: annotation string (excluding '@')
"""
module = ast.parse(code)
assert type(module) is ast.Module, 'internal error #1'
assert len(module.body) == 1, 'Annotation mutable_layers contains more than one expression'
assert type(module.body[0]) is ast.Expr, 'Annotation is not expression'
call = module.body[0].value
nodes = []
mutable_id = 'mutable_block_' + str(lineno)
mutable_layer_cnt = 0
for arg in call.args:
fields = {'layer_choice': False,
'fixed_inputs': False,
'optional_inputs': False,
'optional_input_size': False,
'layer_output': False}
for k, value in zip(arg.keys, arg.values):
if k.id == 'layer_choice':
assert not fields['layer_choice'], 'Duplicated field: layer_choice'
assert type(value) is ast.List, 'Value of layer_choice should be a list'
call_funcs_keys = []
call_funcs_values = []
call_kwargs_values = []
for call in value.elts:
assert type(call) is ast.Call, 'Element in layer_choice should be function call'
call_name = astor.to_source(call).strip()
call_funcs_keys.append(ast.Str(s=call_name))
call_funcs_values.append(call.func)
assert not call.args, 'Number of args without keyword should be zero'
kw_args = []
kw_values = []
for kw in call.keywords:
kw_args.append(kw.arg)
kw_values.append(kw.value)
call_kwargs_values.append(ast.Dict(keys=kw_args, values=kw_values))
call_funcs = ast.Dict(keys=call_funcs_keys, values=call_funcs_values)
call_kwargs = ast.Dict(keys=call_funcs_keys, values=call_kwargs_values)
fields['layer_choice'] = True
elif k.id == 'fixed_inputs':
assert not fields['fixed_inputs'], 'Duplicated field: fixed_inputs'
assert type(value) is ast.List, 'Value of fixed_inputs should be a list'
fixed_inputs = value
fields['fixed_inputs'] = True
elif k.id == 'optional_inputs':
assert not fields['optional_inputs'], 'Duplicated field: optional_inputs'
assert type(value) is ast.List, 'Value of optional_inputs should be a list'
var_names = [ast.Str(s=astor.to_source(var).strip()) for var in value.elts]
optional_inputs = ast.Dict(keys=var_names, values=value.elts)
fields['optional_inputs'] = True
elif k.id == 'optional_input_size':
assert not fields['optional_input_size'], 'Duplicated field: optional_input_size'
assert type(value) is ast.Num, 'Value of optional_input_size should be a number'
optional_input_size = value
fields['optional_input_size'] = True
elif k.id == 'layer_output':
assert not fields['layer_output'], 'Duplicated field: layer_output'
assert type(value) is ast.Name, 'Value of layer_output should be ast.Name type'
layer_output = value
fields['layer_output'] = True
else:
raise AssertionError('Unexpected field in mutable layer')
# make call for this mutable layer
assert fields['layer_choice'], 'layer_choice must exist'
assert fields['layer_output'], 'layer_output must exist'
mutable_layer_id = 'mutable_layer_' + str(mutable_layer_cnt)
mutable_layer_cnt += 1
target_call_attr = ast.Attribute(value=ast.Name(id='nni', ctx=ast.Load()), attr='mutable_layer', ctx=ast.Load())
target_call_args = [ast.Str(s=mutable_id),
ast.Str(s=mutable_layer_id),
call_funcs,
call_kwargs]
if fields['fixed_inputs']:
target_call_args.append(fixed_inputs)
else:
target_call_args.append(ast.NameConstant(value=None))
if fields['optional_inputs']:
target_call_args.append(optional_inputs)
assert fields['optional_input_size'], 'optional_input_size must exist when optional_inputs exists'
target_call_args.append(optional_input_size)
else:
target_call_args.append(ast.NameConstant(value=None))
target_call = ast.Call(func=target_call_attr, args=target_call_args, keywords=[])
node = ast.Assign(targets=[layer_output], value=target_call)
nodes.append(node)
return nodes
def parse_annotation(code): def parse_annotation(code):
"""Parse an annotation string. """Parse an annotation string.
...@@ -235,6 +323,9 @@ class Transformer(ast.NodeTransformer): ...@@ -235,6 +323,9 @@ class Transformer(ast.NodeTransformer):
or string.startswith('@nni.get_next_parameter('): or string.startswith('@nni.get_next_parameter('):
return parse_annotation(string[1:]) # expand annotation string to code return parse_annotation(string[1:]) # expand annotation string to code
if string.startswith('@nni.mutable_layers('):
return parse_annotation_mutable_layers(string[1:], node.lineno)
if string.startswith('@nni.variable(') \ if string.startswith('@nni.variable(') \
or string.startswith('@nni.function_choice('): or string.startswith('@nni.function_choice('):
self.stack[-1] = string[1:] # mark that the next expression is annotated self.stack[-1] = string[1:] # mark that the next expression is annotated
......
...@@ -38,7 +38,8 @@ _ss_funcs = [ ...@@ -38,7 +38,8 @@ _ss_funcs = [
'qnormal', 'qnormal',
'lognormal', 'lognormal',
'qlognormal', 'qlognormal',
'function_choice' 'function_choice',
'mutable_layer'
] ]
...@@ -50,6 +51,18 @@ class SearchSpaceGenerator(ast.NodeTransformer): ...@@ -50,6 +51,18 @@ class SearchSpaceGenerator(ast.NodeTransformer):
self.search_space = {} self.search_space = {}
self.last_line = 0 # last parsed line, useful for error reporting self.last_line = 0 # last parsed line, useful for error reporting
def generate_mutable_layer_search_space(self, args):
mutable_block = args[0].s
mutable_layer = args[1].s
if mutable_block not in self.search_space:
self.search_space[mutable_block] = dict()
self.search_space[mutable_block][mutable_layer] = {
'layer_choice': [key.s for key in args[2].keys],
'optional_inputs': [key.s for key in args[5].keys],
'optional_input_size': args[6].n
}
def visit_Call(self, node): # pylint: disable=invalid-name def visit_Call(self, node): # pylint: disable=invalid-name
self.generic_visit(node) self.generic_visit(node)
...@@ -68,6 +81,10 @@ class SearchSpaceGenerator(ast.NodeTransformer): ...@@ -68,6 +81,10 @@ class SearchSpaceGenerator(ast.NodeTransformer):
self.last_line = node.lineno self.last_line = node.lineno
if func == 'mutable_layer':
self.generate_mutable_layer_search_space(node.args)
return node
if node.keywords: if node.keywords:
# there is a `name` argument # there is a `name` argument
assert len(node.keywords) == 1, 'Smart parameter has keyword argument other than "name"' assert len(node.keywords) == 1, 'Smart parameter has keyword argument other than "name"'
......
import time
def add_one(inputs):
return inputs + 1
def add_two(inputs):
return inputs + 2
def add_three(inputs):
return inputs + 3
def add_four(inputs):
return inputs + 4
def main():
images = 5
"""@nni.mutable_layers(
{
layer_choice: [add_one(), add_two(), add_three(), add_four()],
optional_inputs: [images],
optional_input_size: 1,
layer_output: layer_1_out
},
{
layer_choice: [add_one(), add_two(), add_three(), add_four()],
optional_inputs: [layer_1_out],
optional_input_size: 1,
layer_output: layer_2_out
},
{
layer_choice: [add_one(), add_two(), add_three(), add_four()],
optional_inputs: [layer_1_out, layer_2_out],
optional_input_size: 1,
layer_output: layer_3_out
}
)"""
"""@nni.report_intermediate_result(layer_1_out)"""
time.sleep(2)
"""@nni.report_intermediate_result(layer_2_out)"""
time.sleep(2)
"""@nni.report_intermediate_result(layer_3_out)"""
time.sleep(2)
layer_3_out = layer_3_out + 10
"""@nni.report_final_result(layer_3_out)"""
if __name__ == '__main__':
main()
...@@ -23,7 +23,6 @@ import os ...@@ -23,7 +23,6 @@ import os
import json import json
import shutil import shutil
from .constants import NNICTL_HOME_DIR from .constants import NNICTL_HOME_DIR
from .common_utils import print_error
class Config: class Config:
'''a util class to load and save config''' '''a util class to load and save config'''
...@@ -121,25 +120,3 @@ class Experiments: ...@@ -121,25 +120,3 @@ class Experiments:
except ValueError: except ValueError:
return {} return {}
return {} return {}
class HDFSConfig:
'''manage hdfs configuration'''
def __init__(self):
os.makedirs(NNICTL_HOME_DIR, exist_ok=True)
self.hdfs_config_file = os.path.join(NNICTL_HOME_DIR, '.hdfs')
def get_config(self):
if os.path.exists(self.hdfs_config_file):
try:
with open(self.hdfs_config_file, 'r') as file:
return json.load(file)
except Exception as exception:
print_error(exception)
return None
else:
return None
def set_config(self, host, user_name):
with open(self.hdfs_config_file, 'w') as file:
json.dump({'host':host, 'userName': user_name}, file)
...@@ -125,8 +125,7 @@ def start_rest_server(port, platform, mode, config_file_name, experiment_id=None ...@@ -125,8 +125,7 @@ def start_rest_server(port, platform, mode, config_file_name, experiment_id=None
if mode == 'resume': if mode == 'resume':
cmds += ['--experiment_id', experiment_id] cmds += ['--experiment_id', experiment_id]
stdout_full_path, stderr_full_path = get_log_path(config_file_name) stdout_full_path, stderr_full_path = get_log_path(config_file_name)
stdout_file = open(stdout_full_path, 'a+') with open(stdout_full_path, 'a+') as stdout_file, open(stderr_full_path, 'a+') as stderr_file:
stderr_file = open(stderr_full_path, 'a+')
time_now = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) time_now = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
#add time information in the header of log files #add time information in the header of log files
log_header = LOG_HEADER % str(time_now) log_header = LOG_HEADER % str(time_now)
......
...@@ -194,15 +194,6 @@ def parse_args(): ...@@ -194,15 +194,6 @@ def parse_args():
'the unit is second') 'the unit is second')
parser_top.set_defaults(func=monitor_experiment) parser_top.set_defaults(func=monitor_experiment)
parser_hdfs = subparsers.add_parser('hdfs', help='monitor hdfs files')
parser_hdfs_subparsers = parser_hdfs.add_subparsers()
parser_hdfs_set = parser_hdfs_subparsers.add_parser('set', help='set the host and userName of hdfs')
parser_hdfs_set.add_argument('--host', required=True, dest='host', help='the host of hdfs')
parser_hdfs_set.add_argument('--user_name', required=True, dest='user_name', help='the userName of hdfs')
parser_hdfs_set.set_defaults(func=hdfs_set)
parser_hdfs_list = parser_hdfs_subparsers.add_parser('clean', help='clean hdfs files')
parser_hdfs_list.set_defaults(func=hdfs_clean)
args = parser.parse_args() args = parser.parse_args()
args.func(args) args.func(args)
......
...@@ -27,8 +27,7 @@ import time ...@@ -27,8 +27,7 @@ import time
from subprocess import call, check_output from subprocess import call, check_output
from .rest_utils import rest_get, rest_delete, check_rest_server_quick, check_response from .rest_utils import rest_get, rest_delete, check_rest_server_quick, check_response
from .url_utils import trial_jobs_url, experiment_url, trial_job_id_url, export_data_url from .url_utils import trial_jobs_url, experiment_url, trial_job_id_url, export_data_url
from pyhdfs import HdfsClient, HdfsFileNotFoundException from .config_utils import Config, Experiments
from .config_utils import Config, Experiments, HDFSConfig
from .constants import NNICTL_HOME_DIR, EXPERIMENT_INFORMATION_FORMAT, EXPERIMENT_DETAIL_FORMAT, \ from .constants import NNICTL_HOME_DIR, EXPERIMENT_INFORMATION_FORMAT, EXPERIMENT_DETAIL_FORMAT, \
EXPERIMENT_MONITOR_INFO, TRIAL_MONITOR_HEAD, TRIAL_MONITOR_CONTENT, TRIAL_MONITOR_TAIL, REST_TIME_OUT EXPERIMENT_MONITOR_INFO, TRIAL_MONITOR_HEAD, TRIAL_MONITOR_CONTENT, TRIAL_MONITOR_TAIL, REST_TIME_OUT
from .common_utils import print_normal, print_error, print_warning, detect_process from .common_utils import print_normal, print_error, print_warning, detect_process
...@@ -487,35 +486,3 @@ def export_trials_data(args): ...@@ -487,35 +486,3 @@ def export_trials_data(args):
print_error('Export failed...') print_error('Export failed...')
else: else:
print_error('Restful server is not Running') print_error('Restful server is not Running')
\ No newline at end of file
def hdfs_set(args):
hdfsConfig = HDFSConfig()
hdfsConfig.set_config(args.host, args.user_name)
print_normal('HDFS account update success!')
def hdfs_clean(args):
hdfsConfig = HDFSConfig()
if not hdfsConfig.get_config():
print_error('Please use \'nnictl hdfs set\' command to set hdfs account first!')
exit(1)
host = hdfsConfig.get_config().get('host')
user_name = hdfsConfig.get_config().get('userName')
hdfs_client = HdfsClient(hosts='{0}:80'.format(host), user_name=user_name, webhdfs_path='/webhdfs/api/v1', timeout=5)
root_path = os.path.join('/', user_name, 'nni', 'experiments')
while True:
inputs = input('INFO: clean up all files in {0}, do you want to continue?[Y/N]:'.format(root_path))
if inputs.lower() not in ['y', 'n', 'yes', 'no']:
print_warning('please input Y or N!')
elif inputs.lower() in ['n', 'no']:
exit(0)
else:
break
path_list = hdfs_client.listdir(root_path)
for path in path_list:
full_path = os.path.join(root_path, path)
print_normal('deleting {0}'.format(full_path))
if hdfs_client.delete(full_path, recursive=True):
print_normal('delete success!')
else:
print_normal('delete failed!')
print_normal('DONE')
...@@ -94,9 +94,7 @@ def start_tensorboard_process(args, nni_config, path_list, temp_nni_path): ...@@ -94,9 +94,7 @@ def start_tensorboard_process(args, nni_config, path_list, temp_nni_path):
if detect_port(args.port): if detect_port(args.port):
print_error('Port %s is used by another process, please reset port!' % str(args.port)) print_error('Port %s is used by another process, please reset port!' % str(args.port))
exit(1) exit(1)
with open(os.path.join(temp_nni_path, 'tensorboard_stdout'), 'a+') as stdout_file, open(os.path.join(temp_nni_path, 'tensorboard_stderr'), 'a+') as stderr_file:
stdout_file = open(os.path.join(temp_nni_path, 'tensorboard_stdout'), 'a+')
stderr_file = open(os.path.join(temp_nni_path, 'tensorboard_stderr'), 'a+')
cmds = ['tensorboard', '--logdir', format_tensorboard_log_path(path_list), '--port', str(args.port)] cmds = ['tensorboard', '--logdir', format_tensorboard_log_path(path_list), '--port', str(args.port)]
tensorboard_process = Popen(cmds, stdout=stdout_file, stderr=stderr_file) tensorboard_process = Popen(cmds, stdout=stdout_file, stderr=stderr_file)
url_list = get_local_urls(args.port) url_list = get_local_urls(args.port)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment