Unverified Commit 3d221da9 authored by fishyds's avatar fishyds Committed by GitHub
Browse files

Merge latest code changes into Github Master (#54)

* Merge latest code changes into Github Master

* temporary modification for travis

* temporary modification for travis
parent c015421c
...@@ -7,7 +7,7 @@ _logger.info('start') ...@@ -7,7 +7,7 @@ _logger.info('start')
_result = open('/tmp/nni_assessor_result.txt', 'w') _result = open('/tmp/nni_assessor_result.txt', 'w')
class NaiveAssessor(Assessor): class NaiveAssessor(Assessor):
def __init__(self): def __init__(self, optimize_mode):
self._killed = set() self._killed = set()
_logger.info('init') _logger.info('init')
......
...@@ -8,7 +8,7 @@ _logger.info('start') ...@@ -8,7 +8,7 @@ _logger.info('start')
_result = open('/tmp/nni_tuner_result.txt', 'w') _result = open('/tmp/nni_tuner_result.txt', 'w')
class NaiveTuner(Tuner): class NaiveTuner(Tuner):
def __init__(self): def __init__(self, optimize_mode):
self.cur = 0 self.cur = 0
_logger.info('init') _logger.info('init')
......
__nnictl_cmds="create resume update stop trial webui experiment config rest log"
__nnictl_create_cmds="--config --webuiport"
__nnictl_resume_cmds="--experiment --manager --webuiport"
__nnictl_update_cmds="searchspace concurrency duration"
__nnictl_update_searchspace_cmds="--filename"
__nnictl_update_concurrency_cmds="--value"
__nnictl_update_duration_cmds="--value"
__nnictl_trial_cmds="ls kill"
__nnictl_trial_kill_cmds="--trialid"
__nnictl_webui_cmds="start stop url"
__nnictl_webui_start_cmds="--port"
__nnictl_experiment_cmds="show"
__nnictl_config_cmds="show"
__nnictl_rest_cmds="check"
__nnictl_log_cmds="stdout stderr"
__nnictl_log_stdout_cmds="--tail --head --path"
__nnictl_log_stderr_cmds="--tail --head --path"
__nnictl_remain_args()
{
ret=${!1} # ret = $__nnictl_xxx_cmds
# for arg in COMP_WORDS[:-1]:
for arg in "${COMP_WORDS[@]::${#COMP_WORDS[@]}-1}"; do
# remove previously set argument from ret
if [[ $arg == --* ]]; then
ret=${ret/$arg/}
fi
done
echo $ret
}
_nnictl()
{
_words_cnt=${#COMP_WORDS[@]}
if [ $_words_cnt == 1 ]; then
# no argument input, list all commands
complete -W "$_nnictl_cmds"
elif [ $_words_cnt == 2 ]; then
# completing frst argument from __nnictl_cmds
COMPREPLY=($(compgen -W "$__nnictl_cmds" -- "${COMP_WORDS[1]}"))
elif [ $_words_cnt == 3 ]; then
# completing second argument from __nnictl_${FirstArg}_cmds
args=__nnictl_${COMP_WORDS[1]}_cmds
COMPREPLY=($(compgen -W "${!args}" -- "${COMP_WORDS[2]}"))
elif [[ ${COMP_WORDS[-2]} != -* ]]; then
# last argument does not starts with "-", so this one is likely to be "--xxx"
if [[ ${COMP_WORDS[2]} == -* ]]; then
# second argument starts with "-", use __nnictl_${FirstArg}_cmds
args=__nnictl_${COMP_WORDS[1]}_cmds
else
# second argument is a word, use __nnictl_${FirstArg}_{SecondArg}_cmds
args=__nnictl_${COMP_WORDS[1]}_${COMP_WORDS[2]}_cmds
fi
# remove already set arguments from candidates
remain_args=$(__nnictl_remain_args ${args})
COMPREPLY=($(compgen -W "$remain_args" -- "${COMP_WORDS[-1]}"))
fi
}
complete -F _nnictl nnictl
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from schema import Schema, And, Use, Optional, Regex, Or
CONFIG_SCHEMA = Schema({
'authorName': str,
'experimentName': str,
'trialConcurrency': And(int, lambda n: 1 <=n <= 999999),
'maxExecDuration': Regex(r'^[1-9][0-9]*[s|m|h|d]$'),
'maxTrialNum': And(int, lambda x: 1 <= x <= 99999),
'trainingServicePlatform': And(str, lambda x: x in ['remote', 'local', 'pai']),
Optional('searchSpacePath'): os.path.exists,
'useAnnotation': bool,
'tuner': Or({
'builtinTunerName': Or('TPE', 'Random', 'Anneal', 'Evolution'),
'classArgs': {
'optimize_mode': Or('maximize', 'minimize'),
Optional('speed'): int
},
Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999),
},{
'codeDir': os.path.exists,
'classFileName': str,
'className': str,
'classArgs': {
'optimize_mode': Or('maximize', 'minimize'),
Optional('speed'): int
},
Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999),
}),
'trial':{
'command': str,
'codeDir': os.path.exists,
'gpuNum': And(int, lambda x: 0 <= x <= 99999)
},
Optional('assessor'): Or({
'builtinAssessorName': lambda x: x in ['Medianstop'],
'classArgs': {
'optimize_mode': lambda x: x in ['maximize', 'minimize']},
'gpuNum': And(int, lambda x: 0 <= x <= 99999)
},{
'codeDir': os.path.exists,
'classFileName': str,
'className': str,
'classArgs': {
'optimize_mode': lambda x: x in ['maximize', 'minimize']},
'gpuNum': And(int, lambda x: 0 <= x <= 99999),
}),
Optional('machineList'):[Or({
'ip': str,
'port': And(int, lambda x: 0 < x < 65535),
'username': str,
'passwd': str
},{
'ip': str,
'port': And(int, lambda x: 0 < x < 65535),
'username': str,
'sshKeyPath': os.path.exists,
Optional('passphrase'): str
})],
Optional('pai'):
{
'jobName': str,
"image": str,
"authFile": os.path.exists,
"dataDir": os.path.exists,
"outputDir": os.path.exists,
"codeDir": os.path.exists,
"virtualCluster": str,
"taskRoles": [
{
"name": str,
"taskNumber": And(int, lambda x: 0 <= x <= 99999),
"cpuNumber": And(int, lambda x: 0 <= x <= 99999),
"memoryMB": And(int, lambda x: 0 <= x <= 99999),
"shmMB": And(int, lambda x: 0 <= x <= 99999),
"gpuNumber": And(int, lambda x: 0 <= x <= 99999),
"portList": [
{
"label": str,
"beginAt": str,
"portNumber": And(int, lambda x: 0 < x < 65535)
}
],
"command": str,
"minFailedTaskCount": And(int, lambda x: 0 <= x <= 99999),
"minSucceededTaskCount": And(int, lambda x: 0 <= x <= 99999)
}
],
"gpuType": str,
"retryCount": And(int, lambda x: 0 <= x <= 99999)
}
})
\ No newline at end of file
...@@ -22,9 +22,11 @@ ...@@ -22,9 +22,11 @@
import json import json
import os import os
import shutil import shutil
import string
from subprocess import Popen, PIPE, call from subprocess import Popen, PIPE, call
import tempfile import tempfile
from nni_annotation import * from nni_annotation import *
import random
from .launcher_utils import validate_all_content from .launcher_utils import validate_all_content
from .rest_utils import rest_put, rest_post, check_rest_server, check_rest_server_quick from .rest_utils import rest_put, rest_post, check_rest_server, check_rest_server_quick
from .url_utils import cluster_metadata_url, experiment_url from .url_utils import cluster_metadata_url, experiment_url
...@@ -125,7 +127,7 @@ def launch_experiment(args, experiment_config, mode, webuiport, experiment_id=No ...@@ -125,7 +127,7 @@ def launch_experiment(args, experiment_config, mode, webuiport, experiment_id=No
nni_config.set_config('restServerPid', rest_process.pid) nni_config.set_config('restServerPid', rest_process.pid)
# Deal with annotation # Deal with annotation
if experiment_config.get('useAnnotation'): if experiment_config.get('useAnnotation'):
path = os.path.join(tempfile.gettempdir(), 'nni', 'annotation') path = os.path.join(tempfile.gettempdir(), 'nni', 'annotation', ''.join(random.sample(string.ascii_letters + string.digits, 8)))
if os.path.isdir(path): if os.path.isdir(path):
shutil.rmtree(path) shutil.rmtree(path)
os.makedirs(path) os.makedirs(path)
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
import os import os
import json import json
from .config_schema import CONFIG_SCHEMA
def expand_path(experiment_config, key): def expand_path(experiment_config, key):
'''Change '~' to user home directory''' '''Change '~' to user home directory'''
...@@ -31,45 +32,16 @@ def parse_relative_path(root_path, experiment_config, key): ...@@ -31,45 +32,16 @@ def parse_relative_path(root_path, experiment_config, key):
if experiment_config.get(key) and not os.path.isabs(experiment_config.get(key)): if experiment_config.get(key) and not os.path.isabs(experiment_config.get(key)):
experiment_config[key] = os.path.join(root_path, experiment_config.get(key)) experiment_config[key] = os.path.join(root_path, experiment_config.get(key))
def check_empty(experiment_config, key): def parse_time(experiment_config):
'''Check whether a key is in experiment_config and has non-empty value'''
if key not in experiment_config or experiment_config[key] is None:
raise ValueError('%s can not be empty' % key)
def check_digit(experiment_config, key, start, end):
'''Check whether a value in experiment_config is digit and in a range of [start, end]'''
if not str(experiment_config[key]).isdigit() or experiment_config[key] < start or \
experiment_config[key] > end:
raise ValueError('%s must be a digit from %s to %s' % (key, start, end))
def check_directory(experiment_config, key):
'''Check whether a value in experiment_config is a valid directory'''
if not os.path.isdir(experiment_config[key]):
raise NotADirectoryError('%s is not a valid directory' % key)
def check_file(experiment_config, key):
'''Check whether a value in experiment_config is a valid file'''
if not os.path.exists(experiment_config[key]):
raise FileNotFoundError('%s is not a valid file path' % key)
def check_choice(experiment_config, key, choice_list):
'''Check whether a value in experiment_config is in a choice list'''
if not experiment_config[key] in choice_list:
raise ValueError('%s must in [%s]' % (key, ','.join(choice_list)))
def parse_time(experiment_config, key):
'''Parse time format''' '''Parse time format'''
unit = experiment_config[key][-1] unit = experiment_config['maxExecDuration'][-1]
if unit not in ['s', 'm', 'h', 'd']: if unit not in ['s', 'm', 'h', 'd']:
raise ValueError('the unit of time could only from {s, m, h, d}') raise ValueError('the unit of time could only from {s, m, h, d}')
time = experiment_config[key][:-1] time = experiment_config['maxExecDuration'][:-1]
if not time.isdigit(): if not time.isdigit():
raise ValueError('time format error!') raise ValueError('time format error!')
parse_dict = {'s':1, 'm':60, 'h':3600, 'd':86400} parse_dict = {'s':1, 'm':60, 'h':3600, 'd':86400}
experiment_config[key] = int(time) * parse_dict[unit] experiment_config['maxExecDuration'] = int(time) * parse_dict[unit]
def parse_path(experiment_config, config_path): def parse_path(experiment_config, config_path):
'''Parse path in config file''' '''Parse path in config file'''
...@@ -108,30 +80,12 @@ def validate_search_space_content(experiment_config): ...@@ -108,30 +80,12 @@ def validate_search_space_content(experiment_config):
def validate_common_content(experiment_config): def validate_common_content(experiment_config):
'''Validate whether the common values in experiment_config is valid''' '''Validate whether the common values in experiment_config is valid'''
#validate authorName try:
check_empty(experiment_config, 'authorName') CONFIG_SCHEMA.validate(experiment_config)
except Exception as exception:
#validate experimentName raise Exception(exception)
check_empty(experiment_config, 'experimentName')
#validate trialNoncurrency
check_empty(experiment_config, 'trialConcurrency')
check_digit(experiment_config, 'trialConcurrency', 1, 1000)
#validate execDuration
check_empty(experiment_config, 'maxExecDuration')
parse_time(experiment_config, 'maxExecDuration')
#validate maxTrialNum
check_empty(experiment_config, 'maxTrialNum')
check_digit(experiment_config, 'maxTrialNum', 1, 1000)
#validate trainingService
check_empty(experiment_config, 'trainingServicePlatform')
check_choice(experiment_config, 'trainingServicePlatform', ['local', 'remote'])
def validate_tuner_content(experiment_config): def parse_tuner_content(experiment_config):
'''Validate whether tuner in experiment_config is valid''' '''Validate whether tuner in experiment_config is valid'''
tuner_class_name_dict = {'TPE': 'HyperoptTuner',\ tuner_class_name_dict = {'TPE': 'HyperoptTuner',\
'Random': 'HyperoptTuner',\ 'Random': 'HyperoptTuner',\
...@@ -141,116 +95,51 @@ def validate_tuner_content(experiment_config): ...@@ -141,116 +95,51 @@ def validate_tuner_content(experiment_config):
tuner_algorithm_name_dict = {'TPE': 'tpe',\ tuner_algorithm_name_dict = {'TPE': 'tpe',\
'Random': 'random_search',\ 'Random': 'random_search',\
'Anneal': 'anneal'} 'Anneal': 'anneal'}
if experiment_config.get('tuner') is None:
raise ValueError('Please set tuner!')
if (experiment_config['tuner'].get('builtinTunerName') and \
(experiment_config['tuner'].get('codeDir') or experiment_config['tuner'].get('classFileName') or experiment_config['tuner'].get('className'))) or \
(experiment_config['tuner'].get('codeDir') and experiment_config['tuner'].get('classFileName') and experiment_config['tuner'].get('className') and \
experiment_config['tuner'].get('builtinTunerName')):
raise ValueError('Please check tuner content!')
if experiment_config['tuner'].get('builtinTunerName') and experiment_config['tuner'].get('classArgs'): if experiment_config['tuner'].get('builtinTunerName') and experiment_config['tuner'].get('classArgs'):
if tuner_class_name_dict.get(experiment_config['tuner']['builtinTunerName']) is None:
raise ValueError('Please set correct builtinTunerName!')
experiment_config['tuner']['className'] = tuner_class_name_dict.get(experiment_config['tuner']['builtinTunerName']) experiment_config['tuner']['className'] = tuner_class_name_dict.get(experiment_config['tuner']['builtinTunerName'])
if experiment_config['tuner']['classArgs'].get('optimize_mode') is None: experiment_config['tuner']['classArgs']['algorithm_name'] = tuner_algorithm_name_dict.get(experiment_config['tuner']['builtinTunerName'])
raise ValueError('Please set optimize_mode!')
if experiment_config['tuner']['classArgs']['optimize_mode'] not in ['maximize', 'minimize']:
raise ValueError('optimize_mode should be maximize or minimize')
if tuner_algorithm_name_dict.get(experiment_config['tuner']['builtinTunerName']):
experiment_config['tuner']['classArgs']['algorithm_name'] = tuner_algorithm_name_dict.get(experiment_config['tuner']['builtinTunerName'])
elif experiment_config['tuner'].get('codeDir') and experiment_config['tuner'].get('classFileName') and experiment_config['tuner'].get('className'): elif experiment_config['tuner'].get('codeDir') and experiment_config['tuner'].get('classFileName') and experiment_config['tuner'].get('className'):
if not os.path.exists(os.path.join(experiment_config['tuner']['codeDir'], experiment_config['tuner']['classFileName'])): if not os.path.exists(os.path.join(experiment_config['tuner']['codeDir'], experiment_config['tuner']['classFileName'])):
raise ValueError('Tuner file directory is not valid!') raise ValueError('Tuner file directory is not valid!')
else: else:
raise ValueError('Tuner format is not valid!') raise ValueError('Tuner format is not valid!')
if experiment_config['tuner'].get('gpuNum'): def parse_assessor_content(experiment_config):
check_digit(experiment_config['tuner'], 'gpuNum', 0, 100)
def validate_assessor_content(experiment_config):
'''Validate whether assessor in experiment_config is valid''' '''Validate whether assessor in experiment_config is valid'''
assessor_class_name_dict = {'Medianstop': 'MedianstopAssessor'} assessor_class_name_dict = {'Medianstop': 'MedianstopAssessor'}
if experiment_config.get('assessor'): if experiment_config.get('assessor'):
if (experiment_config['assessor'].get('builtinAssessorName') and \
(experiment_config['assessor'].get('codeDir') or experiment_config['assessor'].get('classFileName') or experiment_config['assessor'].get('className'))) or \
(experiment_config['assessor'].get('codeDir') and experiment_config['assessor'].get('classFileName') and experiment_config['assessor'].get('className') and \
experiment_config['assessor'].get('builtinAssessorName')):
raise ValueError('Please check assessor content!')
if experiment_config['assessor'].get('builtinAssessorName') and experiment_config['assessor'].get('classArgs'): if experiment_config['assessor'].get('builtinAssessorName') and experiment_config['assessor'].get('classArgs'):
if assessor_class_name_dict.get(experiment_config['assessor']['builtinAssessorName']) is None:
raise ValueError('Please set correct builtinAssessorName!')
experiment_config['assessor']['className'] = assessor_class_name_dict.get(experiment_config['assessor']['builtinAssessorName']) experiment_config['assessor']['className'] = assessor_class_name_dict.get(experiment_config['assessor']['builtinAssessorName'])
if experiment_config['assessor']['classArgs'].get('optimize_mode') is None: elif experiment_config['assessor'].get('codeDir') and experiment_config['assessor'].get('classFileName') and experiment_config['assessor'].get('className') and experiment_config['assessor'].get('classArgs'):
raise ValueError('Please set optimize_mode!')
if experiment_config['assessor']['classArgs']['optimize_mode'] not in ['maximize', 'minimize']:
raise ValueError('optimize_mode should be maximize or minimize')
elif experiment_config['assessor'].get('codeDir') and experiment_config['assessor'].get('classFileName') and experiment_config['assessor'].get('className'):
if not os.path.exists(os.path.join(experiment_config['assessor']['codeDir'], experiment_config['assessor']['classFileName'])): if not os.path.exists(os.path.join(experiment_config['assessor']['codeDir'], experiment_config['assessor']['classFileName'])):
raise ValueError('Assessor file directory is not valid!') raise ValueError('Assessor file directory is not valid!')
else: else:
raise ValueError('Assessor format is not valid!') raise ValueError('Assessor format is not valid!')
if experiment_config['assessor'].get('gpuNum'):
check_digit(experiment_config['assessor'], 'gpuNum', 0, 100)
def validate_trail_content(experiment_config):
'''Validate whether trial in experiment_config is valid'''
check_empty(experiment_config, 'trial')
check_empty(experiment_config['trial'], 'command')
check_empty(experiment_config['trial'], 'codeDir')
check_directory(experiment_config['trial'], 'codeDir')
experiment_config['trial']['codeDir'] = os.path.abspath(experiment_config['trial']['codeDir'])
if experiment_config['trial'].get('gpuNum') is None:
experiment_config['trial']['gpuNum'] = 0
else:
check_digit(experiment_config['trial'], 'gpuNum', 0, 100)
def validate_machinelist_content(experiment_config):
'''Validate whether meachineList in experiment_config is valid'''
check_empty(experiment_config, 'machineList')
for i, machine in enumerate(experiment_config['machineList']):
check_empty(machine, 'ip')
if machine.get('port') is None:
experiment_config['machineList'][i]['port'] = 22
else:
check_digit(machine, 'port', 0, 65535)
check_empty(machine, 'username')
if machine.get('passwd') is None and machine.get('sshKeyPath') is None:
raise ValueError('Please set passwd or sshKeyPath for remote machine!')
if machine.get('sshKeyPath') is None and machine.get('passphrase'):
raise ValueError('Please set sshKeyPath!')
if machine.get('sshKeyPath'):
check_file(machine, 'sshKeyPath')
def validate_annotation_content(experiment_config): def validate_annotation_content(experiment_config):
'''Valid whether useAnnotation and searchSpacePath is coexist''' '''Valid whether useAnnotation and searchSpacePath is coexist'''
if experiment_config.get('useAnnotation'): if experiment_config.get('useAnnotation'):
if experiment_config.get('searchSpacePath'): if experiment_config.get('searchSpacePath'):
print('searchSpacePath', experiment_config.get('searchSpacePath'))
raise Exception('If you set useAnnotation=true, please leave searchSpacePath empty') raise Exception('If you set useAnnotation=true, please leave searchSpacePath empty')
else: else:
# validate searchSpaceFile # validate searchSpaceFile
if experiment_config['tuner'].get('tunerName') and experiment_config['tuner'].get('optimizationMode'): if experiment_config['tuner'].get('tunerName') and experiment_config['tuner'].get('optimizationMode'):
check_empty(experiment_config, 'searchSpacePath') if experiment_config.get('searchSpacePath') is None:
check_file(experiment_config, 'searchSpacePath') raise Exception('Please set searchSpace!')
validate_search_space_content(experiment_config) validate_search_space_content(experiment_config)
def validate_machine_list(experiment_config):
'''Validate machine list'''
if experiment_config.get('trainingServicePlatform') == 'remote' and experiment_config.get('machineList') is None:
raise Exception('Please set machineList!')
def validate_all_content(experiment_config, config_path): def validate_all_content(experiment_config, config_path):
'''Validate whether experiment_config is valid''' '''Validate whether experiment_config is valid'''
parse_path(experiment_config, config_path) parse_path(experiment_config, config_path)
validate_common_content(experiment_config) validate_common_content(experiment_config)
validate_tuner_content(experiment_config) parse_time(experiment_config)
validate_assessor_content(experiment_config) parse_tuner_content(experiment_config)
validate_trail_content(experiment_config) parse_assessor_content(experiment_config)
validate_annotation_content(experiment_config) validate_annotation_content(experiment_config)
if experiment_config['trainingServicePlatform'] == 'remote':
validate_machinelist_content(experiment_config)
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
import os import os
import psutil import psutil
import json import json
import datetime
from subprocess import call, check_output from subprocess import call, check_output
from .rest_utils import rest_get, rest_delete, check_rest_server_quick from .rest_utils import rest_get, rest_delete, check_rest_server_quick
from .config_utils import Config from .config_utils import Config
...@@ -30,6 +31,18 @@ import time ...@@ -30,6 +31,18 @@ import time
from .common_utils import print_normal, print_error, detect_process from .common_utils import print_normal, print_error, detect_process
from .webui_utils import stop_web_ui, check_web_ui, start_web_ui from .webui_utils import stop_web_ui, check_web_ui, start_web_ui
def convert_time_stamp_to_date(content):
'''Convert time stamp to date time format'''
start_time_stamp = content.get('startTime')
end_time_stamp = content.get('endTime')
if start_time_stamp:
start_time = datetime.datetime.utcfromtimestamp(start_time_stamp // 1000).strftime("%Y/%m/%d %H:%M:%S")
content['startTime'] = str(start_time)
if end_time_stamp:
end_time = datetime.datetime.utcfromtimestamp(end_time_stamp // 1000).strftime("%Y/%m/%d %H:%M:%S")
content['endTime'] = str(end_time)
return content
def check_rest(args): def check_rest(args):
'''check if restful server is running''' '''check if restful server is running'''
nni_config = Config() nni_config = Config()
...@@ -72,7 +85,10 @@ def trial_ls(args): ...@@ -72,7 +85,10 @@ def trial_ls(args):
if check_rest_server_quick(rest_port): if check_rest_server_quick(rest_port):
response = rest_get(trial_jobs_url(rest_port), 20) response = rest_get(trial_jobs_url(rest_port), 20)
if response and response.status_code == 200: if response and response.status_code == 200:
print(json.dumps(json.loads(response.text), indent=4, sort_keys=True, separators=(',', ':'))) content = json.loads(response.text)
for index, value in enumerate(content):
content[index] = convert_time_stamp_to_date(value)
print(json.dumps(content, indent=4, sort_keys=True, separators=(',', ':')))
else: else:
print_error('List trial failed...') print_error('List trial failed...')
else: else:
...@@ -106,7 +122,8 @@ def list_experiment(args): ...@@ -106,7 +122,8 @@ def list_experiment(args):
if check_rest_server_quick(rest_port): if check_rest_server_quick(rest_port):
response = rest_get(experiment_url(rest_port), 20) response = rest_get(experiment_url(rest_port), 20)
if response and response.status_code == 200: if response and response.status_code == 200:
print(json.dumps(json.loads(response.text), indent=4, sort_keys=True, separators=(',', ':'))) content = convert_time_stamp_to_date(json.loads(response.text))
print(json.dumps(content, indent=4, sort_keys=True, separators=(',', ':')))
else: else:
print_error('List experiment failed...') print_error('List experiment failed...')
else: else:
......
#!/bin/bash
python3 -m nnicmd.nnictl $@
...@@ -10,7 +10,8 @@ setuptools.setup( ...@@ -10,7 +10,8 @@ setuptools.setup(
'requests', 'requests',
'pyyaml', 'pyyaml',
'psutil', 'psutil',
'astor' 'astor',
'schema'
], ],
author = 'Microsoft NNI Team', author = 'Microsoft NNI Team',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment