Unverified Commit 704b50e2 authored by SparkSnail's avatar SparkSnail Committed by GitHub
Browse files

Merge pull request #200 from microsoft/master

merge master
parents 755ac5f0 3a6d1372
......@@ -91,7 +91,10 @@ def json2parameter(in_x, parameter, name=NodeType.ROOT):
name=name + '[%d]' % _index)
}
else:
out_y = parameter[name]
if _type in ['quniform', 'qloguniform']:
out_y = np.clip(parameter[name], in_x[NodeType.VALUE][0], in_x[NodeType.VALUE][1])
else:
out_y = parameter[name]
else:
out_y = dict()
for key in in_x.keys():
......
......@@ -131,12 +131,41 @@ def oneshot_mode(
optional_inputs = [optional_inputs[idx] for idx in range(inputs_num)]
layer_outs = [func([fixed_inputs, optional_inputs], **funcs_args[func_name])
for func_name, func in funcs.items()]
layer_out = tf.add_n(layer_outs)
output_num = len(layer_outs)
rate = 0.01 ** (1 / output_num)
noise_shape = [output_num] + [1] * len(layer_outs[0].get_shape())
layer_outs = tf.nn.dropout(layer_outs, rate=rate, noise_shape=noise_shape)
layer_out = tf.reduce_sum(layer_outs, axis=0)
return layer_out
def reload_tensorflow_variables(session, tf=None):
def darts_mode(
mutable_id,
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size,
tf):
optional_inputs = list(optional_inputs.values())
layer_outs = [func([fixed_inputs, optional_inputs], **funcs_args[func_name])
for func_name, func in funcs.items()]
# Create architecture weights for every func(op)
var_name = "{}_{}_".format(mutable_id, mutable_layer_id, "arch_weights")
if 'arch_logits_list' not in globals():
global arch_logits_list
arch_logits_list = list()
arch_logits = tf.get_variable(var_name, shape=[len[funcs]], trainable=False)
arch_logits_list.append(arch_logits)
arch_weights = tf.nn.softmax(arch_logits)
layer_out = tf.add_n([arch_weights[idx] * out for idx, out in enumerate(layer_outs)])
return layer_out
def reload_tensorflow_variables(tf, session):
'''In Enas mode, this function reload every signal varaible created in `enas_mode` function so
the whole tensorflow graph will be changed into certain subgraph recerived from Tuner.
---------------
......@@ -158,3 +187,22 @@ def reload_tensorflow_variables(session, tf=None):
tf_variables[name_prefix]['funcs'].load(chosen_layer, session)
tf_variables[name_prefix]['optional_inputs'].load(
chosen_inputs, session)
def darts_training(tf, session, loss, feed_dict):
if 'optimizer' not in globals():
global arch_logits_list
global optimizer
global train_op
optimizer = tf.MomentumOptimizer(learning_rate=0.025)
# TODO: Calculate loss
grads_and_vars = optimizer.compute_gradients(loss, arch_logits_list)
train_op = optimizer.apply_gradients(grads_and_vars)
session.run(train_op)
def training_update(nas_mode, tf=None, session=None, loss=None, feed_dict=None):
if nas_mode == 'darts_mode':
darts_training(tf, session, loss, feed_dict)
elif nas_mode == 'enas_mode':
reload_tensorflow_variables(tf, session)
\ No newline at end of file
......@@ -57,7 +57,7 @@ def quniform(low, high, q, random_state):
q: sample step
random_state: an object of numpy.random.RandomState
'''
return np.round(uniform(low, high, random_state) / q) * q
return np.clip(np.round(uniform(low, high, random_state) / q) * q, low, high)
def loguniform(low, high, random_state):
......@@ -77,7 +77,7 @@ def qloguniform(low, high, q, random_state):
q: sample step
random_state: an object of numpy.random.RandomState
'''
return np.round(loguniform(low, high, random_state) / q) * q
return np.clip(np.round(loguniform(low, high, random_state) / q) * q, low, high)
def normal(mu, sigma, random_state):
......
......@@ -24,7 +24,7 @@ import numpy as np
from .env_vars import trial_env_vars
from . import trial
from .nas_utils import classic_mode, enas_mode, oneshot_mode
from .nas_utils import classic_mode, enas_mode, oneshot_mode, darts_mode
__all__ = [
......@@ -57,14 +57,14 @@ if trial_env_vars.NNI_PLATFORM is None:
def quniform(low, high, q, name=None):
assert high > low, 'Upper bound must be larger than lower bound'
return round(random.uniform(low, high) / q) * q
return np.clip(round(random.uniform(low, high) / q) * q, low, high)
def loguniform(low, high, name=None):
assert low > 0, 'Lower bound must be positive'
return np.exp(random.uniform(np.log(low), np.log(high)))
def qloguniform(low, high, q, name=None):
return round(loguniform(low, high) / q) * q
return np.clip(round(loguniform(low, high) / q) * q, low, high)
def normal(mu, sigma, name=None):
return random.gauss(mu, sigma)
......@@ -158,8 +158,8 @@ else:
fixed_inputs,
optional_inputs,
optional_input_size)
elif mode == 'enas_mode':
assert tf is not None, 'Internal Error: Tensorflow should not be None in enas_mode'
assert tf is not None, 'Internal Error: Tensorflow should not be None in modes other than classic_mode'
if mode == 'enas_mode':
return enas_mode(mutable_id,
mutable_layer_id,
funcs,
......@@ -168,8 +168,7 @@ else:
optional_inputs,
optional_input_size,
tf)
elif mode == 'oneshot_mode':
assert tf is not None, 'Internal Error: Tensorflow should not be None in oneshot_mode'
if mode == 'oneshot_mode':
return oneshot_mode(mutable_id,
mutable_layer_id,
funcs,
......@@ -178,8 +177,16 @@ else:
optional_inputs,
optional_input_size,
tf)
else:
raise RuntimeError('Unrecognized mode: %s' % mode)
if mode == 'darts_mode':
return darts_mode(mutable_id,
mutable_layer_id,
funcs,
funcs_args,
fixed_inputs,
optional_inputs,
optional_input_size,
tf)
raise RuntimeError('Unrecognized mode: %s' % mode)
def _get_param(key):
if trial._params is None:
......
......@@ -37,8 +37,8 @@ class DefaultPoint extends React.Component<DefaultPointProps, DefaultPointState>
defaultMetric = (succeedSource: Array<TableObj>, isCurve: boolean) => {
const { optimize } = this.props;
const accSource: Array<DetailAccurPoint> = [];
const showSource: Array<TableObj> = succeedSource.filter(filterByStatus);
const lengthOfSource = showSource.length;
const drawSource: Array<TableObj> = succeedSource.filter(filterByStatus);
const lengthOfSource = drawSource.length;
const tooltipDefault = lengthOfSource === 0 ? 'No data' : '';
if (this._isDefaultMounted === true) {
this.setState(() => ({
......@@ -67,13 +67,14 @@ class DefaultPoint extends React.Component<DefaultPointProps, DefaultPointState>
}
} else {
const resultList: Array<number | object>[] = [];
const lineListDefault: Array<number> = [];
Object.keys(showSource).map(item => {
const temp = showSource[item];
// lineListDefault: [[sequenceId, default metric], []]
const lineListDefault: Array<number>[] = [];
Object.keys(drawSource).map(item => {
const temp = drawSource[item];
if (temp.acc !== undefined) {
if (temp.acc.default !== undefined) {
const searchSpace = temp.description.parameters;
lineListDefault.push(temp.acc.default);
lineListDefault.push([temp.sequenceId, temp.acc.default]);
accSource.push({
acc: temp.acc.default,
index: temp.sequenceId,
......@@ -84,25 +85,25 @@ class DefaultPoint extends React.Component<DefaultPointProps, DefaultPointState>
});
// deal with best metric line
const bestCurve: Array<number | object>[] = []; // best curve data source
bestCurve.push([0, lineListDefault[0], accSource[0].searchSpace]); // push the first value
bestCurve.push([lineListDefault[0][0], lineListDefault[0][1], accSource[0].searchSpace]);
if (optimize === 'maximize') {
for (let i = 1; i < lineListDefault.length; i++) {
const val = lineListDefault[i];
const val = lineListDefault[i][1];
const latest = bestCurve[bestCurve.length - 1][1];
if (val >= latest) {
bestCurve.push([i, val, accSource[i].searchSpace]);
bestCurve.push([lineListDefault[i][0], val, accSource[i].searchSpace]);
} else {
bestCurve.push([i, latest, accSource[i].searchSpace]);
bestCurve.push([lineListDefault[i][0], latest, accSource[i].searchSpace]);
}
}
} else {
for (let i = 1; i < lineListDefault.length; i++) {
const val = lineListDefault[i];
const val = lineListDefault[i][1];
const latest = bestCurve[bestCurve.length - 1][1];
if (val <= latest) {
bestCurve.push([i, val, accSource[i].searchSpace]);
bestCurve.push([lineListDefault[i][0], val, accSource[i].searchSpace]);
} else {
bestCurve.push([i, latest, accSource[i].searchSpace]);
bestCurve.push([lineListDefault[i][0], latest, accSource[i].searchSpace]);
}
}
}
......@@ -165,15 +166,17 @@ class DefaultPoint extends React.Component<DefaultPointProps, DefaultPointState>
type: 'value',
scale: true
},
series: [{
symbolSize: 6,
type: 'scatter',
data: resultList
}, {
type: 'line',
lineStyle: { color: '#FF6600' },
data: realDefault
}]
series: [
{
type: 'line',
lineStyle: { color: '#FF6600' },
data: realDefault
},
{
symbolSize: 6,
type: 'scatter',
data: resultList
}]
};
}
......
......@@ -9,9 +9,10 @@ tuner:
classFileName: random_nas_tuner.py
className: RandomNASTuner
trial:
codeDir: ../../../examples/trials/mnist-nas
codeDir: ../../../examples/trials/mnist-nas/classic_mode/
command: python3 mnist.py --batch_num 100
gpuNum: 0
nasMode: classic_mode
useAnnotation: true
multiPhase: false
......
......@@ -17,4 +17,4 @@
* 使用了私有 API 来检测是否 Tuner 和 Assessor 成功结束。
* RESTful 服务的输出未测试。
* 远程计算机训练服务没有测试。
\ No newline at end of file
* 远程计算机训练平台没有被测试。
\ No newline at end of file
......@@ -50,6 +50,7 @@ jobs:
displayName: 'Install dependencies for integration tests in PAI mode'
- script: |
set -e
if [ $(build_docker_img) = 'true' ]
then
cd deployment/pypi
......
......@@ -48,6 +48,7 @@ jobs:
cat test/port
displayName: 'Get docker port'
- script: |
set -e
cd test
python3 generate_ts_config.py --ts remote --remote_user $(docker_user) --remote_host $(remote_host) \
--remote_port $(cat port) --remote_pwd $(docker_pwd) --nni_manager_ip $(nni_manager_ip)
......
# list of commands/arguments
__nnictl_cmds="create resume update stop trial experiment config webui log"
__nnictl_create_cmds="--config --port"
__nnictl_resume_cmds="--port"
__nnictl_cmds="create resume update stop trial experiment platform import export webui config log package tensorboard top"
__nnictl_create_cmds="--config --port --debug"
__nnictl_resume_cmds="--port --debug"
__nnictl_update_cmds="searchspace concurrency duration trialnum"
__nnictl_update_searchspace_cmds="--filename"
__nnictl_update_concurrency_cmds="--value"
__nnictl_update_duration_cmds="--value"
__nnictl_update_trialnum_cmds="--value"
__nnictl_trial_cmds="ls kill"
__nnictl_trial_kill_cmds="--trialid"
__nnictl_stop_cmds="--port all"
__nnictl_trial_cmds="ls kill codegen"
__nnictl_trial_kill_cmds="--trial_id"
__nnictl_trial_codegen_cmds="--trial_id"
__nnictl_experiment_cmds="show status list delete"
__nnictl_experiment_list_cmds="--all"
__nnictl_experiment_delete_cmds="--all"
__nnictl_platform_cmds="clean"
__nnictl_platform_clean_cmds="--config"
__nnictl_import_cmds="--filename"
__nnictl_export_cmds="--type --filename"
__nnictl_webui_cmds="url"
__nnictl_experiment_cmds="show list status"
__nnictl_experiment_list_cmds="all"
__nnictl_config_cmds="show"
__nnictl_log_cmds="stdout stderr trial"
__nnictl_log_stdout_cmds="--tail --head --path"
__nnictl_log_stderr_cmds="--tail --head --path"
__nnictl_log_trial_cmds="--trial_id"
__nnictl_package_cmds="install show"
__nnictl_package_install_cmds="--name"
__nnictl_tensorboard_cmds="start stop"
__nnictl_tensorboard_start_cmds="--trial_id --port"
__nnictl_top_cmds="--time"
# list of arguments that accept a file name
__nnictl_file_args=" --config -c --filename -f "
# list of arguments that accept an experiment ID
__nnictl_experiment_args=" --experiment -e "
# list of arguments that accept a trial ID
__nnictl_trial_args=" --trialid -t "
# list of commands that accept an experiment ID as second argument
__nnictl_2st_expid_cmds=" resume stop import export "
# list of commands that accept an experiment ID as third argument
__nnictl_3rd_expid_cmds=" update trial experiment webui config log tensorboard "
# remove already set arguments from candidates
......@@ -33,15 +42,27 @@ __nnictl_remain_args()
local ret=${!1} # ret = $__nnictl_xxx_cmds
# for arg in COMP_WORDS[:-1]:
for arg in "${COMP_WORDS[@]::${#COMP_WORDS[@]}-1}"; do
if [[ $arg == --* ]]; then
local ret=${ret/$arg/} # remove it from $ret
fi
local ret=${ret/$arg/} # remove it from $ret
done
echo $ret
}
# complete files with specific extension
__nnictl_complete_extension()
{
COMPREPLY=($(compgen -f -X "!*.$1" -- ${COMP_WORDS[-1]}))
if [[ -z "${COMPREPLY[*]}" ]]; then
# if there is no matching file here, search in sub-directories
COMPREPLY=($(compgen -d -S "/" -- ${COMP_WORDS[-1]}))
compopt -o nospace
fi
}
_nnictl()
{
local cur=${COMP_WORDS[-1]}
local last=${COMP_WORDS[-2]}
if [[ ${#COMP_WORDS[@]} -eq 2 ]]; then
# completing frst argument from __nnictl_cmds
COMPREPLY=($(compgen -W "$__nnictl_cmds" -- "${COMP_WORDS[1]}"))
......@@ -51,33 +72,70 @@ _nnictl()
local args=__nnictl_${COMP_WORDS[1]}_cmds
COMPREPLY=($(compgen -W "${!args}" -- "${COMP_WORDS[2]}"))
elif [[ ${COMP_WORDS[-2]} != -* ]]; then
# add experiment IDs to candidates if desired
if [[ " resume stop import export " =~ " ${COMP_WORDS[1]} " ]]; then
local experiments=$(ls ~/nni/experiments 2>/dev/null)
COMPREPLY+=($(compgen -W "$experiments" -- $cur))
fi
elif [[ $last != -* || $last == --debug ]]; then
# last argument does not starts with "-", so this one is likely to be "--xxx"
if [[ ${COMP_WORDS[2]} == -* ]]; then
# second argument starts with "-", use __nnictl_${FirstArg}_cmds
local args=__nnictl_${COMP_WORDS[1]}_${COMP_WORDS[2]}_cmds
if [[ $args =~ "-" || -z ${!args} ]]; then
# the second argument starts with "-", use __nnictl_${FirstArg}_cmds
local args=__nnictl_${COMP_WORDS[1]}_cmds
else
# second argument is a word, use __nnictl_${FirstArg}_{SecondArg}_cmds
local args=__nnictl_${COMP_WORDS[1]}_${COMP_WORDS[2]}_cmds
fi
# remove already set arguments from candidates
local remain_args=$(__nnictl_remain_args ${args})
COMPREPLY=($(compgen -W "$remain_args" -- "${COMP_WORDS[-1]}"))
COMPREPLY=($(compgen -W "$remain_args" -- $cur))
# if this is 3rd arguments, try adding experiment IDs to candidates
if [[ ${#COMP_WORDS[@]} -eq 4 ]]; then
if [[ $__nnictl_3rd_expid_cmds =~ " ${COMP_WORDS[1]} " && ${COMP_WORDS[2]} != "list" ]]; then
local experiments=$(ls ~/nni/experiments 2>/dev/null)
COMPREPLY+=($(compgen -W "$experiments" -- $cur))
fi
fi
elif [[ ${COMP_WORDS[1]} == "export" ]]; then
# "export" command is somewhat unique
if [[ " --type -t " =~ " $last " ]]; then
COMPREPLY=($(compgen -W "json csv" -- $cur))
elif [[ " --filename -f " =~ " $last " ]]; then
# try to detect whether complete CSV file or JSON file
[[ "$COMP_LINE" =~ "csv" ]] && local export_csv=1
[[ "$COMP_LINE" =~ "json" ]] && local export_json=1
if [[ -n $export_csv && -z $export_json ]]; then
local ext="csv" # CSV only
elif [[ -n $export_json && -z $export_csv ]]; then
local ext="json" # JSON only
else
local ext="@(csv|json)" # both
fi
__nnictl_complete_extension "$ext"
fi
elif [[ " --trial_id -t " =~ " $last " ]]; then
# complete trial ID
if [[ -e ${HOME}/nni/experiments/${COMP_WORDS[2]} ]]; then
local trials=$(ls -d ~/nni/experiments/${COMP_WORDS[2]}/trials/* 2>/dev/null | grep -o '[^/]*$')
elif [[ -e "${HOME}/nni/experiments/${COMP_WORDS[3]}" ]]; then
local trials=$(ls -d ~/nni/experiments/${COMP_WORDS[3]}/trials/* 2>/dev/null | grep -o '[^/]*$')
else
local trials=$(ls -d ~/nni/experiments/*/trials/* 2>/dev/null | grep -o '[^/]*$')
fi
COMPREPLY=($(compgen -W "$trials" -- $cur))
elif [[ $__nnictl_file_args =~ " ${COMP_WORDS[-2]} " ]]; then
# complete file names
COMPREPLY=($(compgen -f "${COMP_WORDS[-1]}"))
elif [[ " --config -c " =~ " $last " ]]; then
# complete YAML file
__nnictl_complete_extension "@(yml|yaml)"
elif [[ $__nnictl_experiment_args =~ " ${COMP_WORDS[-2]} " ]]; then
# complete experiment IDs
local experiments=$(ls ~/nni/experiments 2>/dev/null)
COMPREPLY=($(compgen -W "$experiments" -- "${COMP_WORDS[-1]}"))
elif [[ " --filename -f " =~ " $last " ]]; then
# complete JSON file
__nnictl_complete_extension "json"
elif [[ $__nnictl_trial_args =~ " ${COMP_WORDS[-2]} " ]]; then
# complete trial IDs
local trials=$(ls -d ~/nni/experiments/*/trials/* 2>/dev/null | grep -o '[^/]*$')
COMPREPLY=($(compgen -W "$trials" -- "${COMP_WORDS[-1]}"))
fi
}
complete -o nospace -F _nnictl nnictl
complete -o nosort -F _nnictl nnictl
......@@ -28,7 +28,7 @@ In NNI, there are mainly four types of annotation:
**Arguments**
- **sampling_algo**: Sampling algorithm that specifies a search space. User should replace it with a built-in NNI sampling function whose name consists of an `nni.` identification and a search space type specified in [SearchSpaceSpec](https://nni.readthedocs.io/en/latest/SearchSpaceSpec.html) such as `choice` or `uniform`.
- **sampling_algo**: Sampling algorithm that specifies a search space. User should replace it with a built-in NNI sampling function whose name consists of an `nni.` identification and a search space type specified in [SearchSpaceSpec](https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html) such as `choice` or `uniform`.
- **name**: The name of the variable that the selected value will be assigned to. Note that this argument should be the same as the left value of the following assignment statement.
There are 10 types to express your search space as follows:
......@@ -83,10 +83,10 @@ h_pooling = max_pool(hidden_layer, pool_size)
`'''@nni.report_intermediate_result(metrics)'''`
`@nni.report_intermediate_result` is used to report intermediate result, whose usage is the same as `nni.report_intermediate_result` in [Trials.md](https://nni.readthedocs.io/en/latest/Trials.html)
`@nni.report_intermediate_result` is used to report intermediate result, whose usage is the same as `nni.report_intermediate_result` in the doc of [Write a trial run on NNI](https://nni.readthedocs.io/en/latest/TrialExample/Trials.html)
### 4. Annotate final result
`'''@nni.report_final_result(metrics)'''`
`@nni.report_final_result` is used to report the final result of the current trial, whose usage is the same as `nni.report_final_result` in [Trials.md](https://nni.readthedocs.io/en/latest/Trials.html)
`@nni.report_final_result` is used to report the final result of the current trial, whose usage is the same as `nni.report_final_result` in the doc of [Write a trial run on NNI](https://nni.readthedocs.io/en/latest/TrialExample/Trials.html)
......@@ -27,7 +27,7 @@ NNI 中,有 4 种类型的 Annotation;
**参数**
- **sampling_algo**: 指定搜索空间的采样算法。 可将其换成 NNI 支持的其它采样函数,函数要以 `nni.` 开头。例如,`choice``uniform`,详见 [SearchSpaceSpec](https://nni.readthedocs.io/zh/latest/SearchSpaceSpec.html)
- **sampling_algo**: 指定搜索空间的采样算法。 可将其换成 NNI 支持的其它采样函数,函数要以 `nni.` 开头。例如,`choice``uniform`,详见 [SearchSpaceSpec](https://nni.readthedocs.io/zh/latest/Tutorial/SearchSpaceSpec.html)
- **name**: 将被赋值的变量名称。 注意,此参数应该与下面一行等号左边的值相同。
NNI 支持如下 10 种类型来表示搜索空间:
......@@ -72,10 +72,10 @@ h_pooling = max_pool(hidden_layer, pool_size)
`'''@nni.report_intermediate_result(metrics)'''`
`@nni.report_intermediate_result` 用来返回中间结果,这和 [Trials.md](https://nni.readthedocs.io/zh/latest/Trials.html) `nni.report_intermediate_result` 用法一样。
`@nni.report_intermediate_result` 用来返回中间结果,这和[在 NNI 上实现 Trial](https://nni.readthedocs.io/zh/latest/TrialExample/Trials.html)`nni.report_intermediate_result` 用法一样。
### 4. 最终结果
`'''@nni.report_final_result(metrics)'''`
`@nni.report_final_result` 用来返回当前 Trial 的最终结果,这和 [Trials.md](https://nni.readthedocs.io/zh/latest/Trials.html) 中的 `nni.report_final_result` 用法一样。
\ No newline at end of file
`@nni.report_final_result` 用来返回当前 Trial 的最终结果,这和[在 NNI 上实现 Trial](https://nni.readthedocs.io/zh/latest/TrialExample/Trials.html) 中的 `nni.report_final_result` 用法一样。
\ No newline at end of file
......@@ -111,7 +111,7 @@ def parse_annotation_mutable_layers(code, lineno, nas_mode):
target_call_args.append(ast.Dict(keys=[], values=[]))
target_call_args.append(ast.Num(n=0))
target_call_args.append(ast.Str(s=nas_mode))
if nas_mode in ['enas_mode', 'oneshot_mode']:
if nas_mode in ['enas_mode', 'oneshot_mode', 'darts_mode']:
target_call_args.append(ast.Name(id='tensorflow'))
target_call = ast.Call(func=target_call_attr, args=target_call_args, keywords=[])
node = ast.Assign(targets=[layer_output], value=target_call)
......@@ -319,12 +319,11 @@ class Transformer(ast.NodeTransformer):
else:
return node # not an annotation, ignore it
if string.startswith('@nni.get_next_parameter'):
call_node = parse_annotation(string[1:]).value
if call_node.args:
# it is used in enas mode as it needs to retrieve the next subgraph for training
call_attr = ast.Attribute(value=ast.Name(id='nni', ctx=ast.Load()), attr='reload_tensorflow_variables', ctx=ast.Load())
return ast.Expr(value=ast.Call(func=call_attr, args=call_node.args, keywords=[]))
if string.startswith('@nni.training_update'):
expr = parse_annotation(string[1:])
call_node = expr.value
call_node.args.insert(0, ast.Str(s=self.nas_mode))
return expr
if string.startswith('@nni.report_intermediate_result') \
or string.startswith('@nni.report_final_result') \
......@@ -378,8 +377,8 @@ def parse(code, nas_mode=None):
if type(nodes[i]) is ast.ImportFrom and nodes[i].module == '__future__':
last_future_import = i
nodes.insert(last_future_import + 1, import_nni)
# enas and oneshot modes for tensorflow need tensorflow module, so we import it here
if nas_mode in ['enas_mode', 'oneshot_mode']:
# enas, oneshot and darts modes for tensorflow need tensorflow module, so we import it here
if nas_mode in ['enas_mode', 'oneshot_mode', 'darts_mode']:
import_tf = ast.Import(names=[ast.alias(name='tensorflow', asname=None)])
nodes.insert(last_future_import + 1, import_tf)
......
......@@ -221,7 +221,7 @@ common_trial_schema = {
'command': setType('command', str),
'codeDir': setPathCheck('codeDir'),
Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
Optional('nasMode'): setChoice('classic_mode', 'enas_mode', 'oneshot_mode')
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode')
}
}
......@@ -241,7 +241,7 @@ pai_trial_schema = {
Optional('outputDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
error='ERROR: outputDir format error, outputDir format is hdfs://xxx.xxx.xxx.xxx:xxx'),
Optional('virtualCluster'): setType('virtualCluster', str),
Optional('nasMode'): setChoice('classic_mode', 'enas_mode', 'oneshot_mode')
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode')
}
}
......@@ -256,7 +256,7 @@ pai_config_schema = {
kubeflow_trial_schema = {
'trial':{
'codeDir': setPathCheck('codeDir'),
Optional('nasMode'): setChoice('classic_mode', 'enas_mode', 'oneshot_mode'),
Optional('nasMode'): setChoice('nasMode', 'classic_mode', 'enas_mode', 'oneshot_mode', 'darts_mode'),
Optional('ps'): {
'replicas': setType('replicas', int),
'command': setType('command', str),
......
......@@ -202,9 +202,9 @@ def parse_args():
parser_tensorboard_start.add_argument('--trial_id', '-T', dest='trial_id', help='the id of trial')
parser_tensorboard_start.add_argument('--port', dest='port', default=6006, help='the port to start tensorboard')
parser_tensorboard_start.set_defaults(func=start_tensorboard)
parser_tensorboard_start = parser_tensorboard_subparsers.add_parser('stop', help='stop tensorboard')
parser_tensorboard_start.add_argument('id', nargs='?', help='the id of experiment')
parser_tensorboard_start.set_defaults(func=stop_tensorboard)
parser_tensorboard_stop = parser_tensorboard_subparsers.add_parser('stop', help='stop tensorboard')
parser_tensorboard_stop.add_argument('id', nargs='?', help='the id of experiment')
parser_tensorboard_stop.set_defaults(func=stop_tensorboard)
#parse top command
parser_top = subparsers.add_parser('top', help='monitor the experiment')
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment