Unverified Commit ef176d29 authored by SparkSnail's avatar SparkSnail Committed by GitHub
Browse files

Merge pull request #116 from Microsoft/master

merge master
parents 97866505 4553de75
authorName: nni
experimentName: default_test
maxExecDuration: 5m
maxTrialNum: 4
trialConcurrency: 2
searchSpacePath: ../../../examples/trials/sklearn/regression/search_space.json
tuner:
builtinTunerName: Random
classArgs:
optimize_mode: maximize
assessor:
builtinAssessorName: Medianstop
classArgs:
optimize_mode: maximize
trial:
codeDir: ../../../examples/trials/sklearn/regression
command: python3 main.py
gpuNum: 0
useAnnotation: false
multiPhase: false
multiThread: false
trainingServicePlatform: local
import time
import nni
if __name__ == '__main__':
for i in range(5):
hyper_params = nni.get_next_parameter()
nni.report_final_result(0.1*i)
time.sleep(3)
authorName: nni
experimentName: default_test
maxExecDuration: 5m
maxTrialNum: 2
trialConcurrency: 2
searchSpacePath: ./search_space.json
tuner:
codeDir: ../../../src/sdk/pynni/tests
classFileName: test_multi_phase_tuner.py
className: NaiveMultiPhaseTuner
trial:
codeDir: .
command: python3 multi_phase.py
gpuNum: 0
useAnnotation: false
multiPhase: true
multiThread: false
trainingServicePlatform: local
{
"test":
{
"_type" : "choice",
"_value" : [1, 100]
}
}
\ No newline at end of file
authorName: nni
experimentName: default_test
maxExecDuration: 5m
maxTrialNum: 4
trialConcurrency: 2
searchSpacePath: ./search_space.json
tuner:
codeDir: .
classFileName: multi_thread_tuner.py
className: MultiThreadTuner
trial:
codeDir: .
command: python3 multi_thread_trial.py
gpuNum: 0
useAnnotation: false
multiPhase: false
multiThread: true
trainingServicePlatform: local
import nni
import time
if __name__ == '__main__':
nni.get_next_parameter()
time.sleep(3)
nni.report_final_result(0.5)
import time
from nni.tuner import Tuner
class MultiThreadTuner(Tuner):
def __init__(self):
self.parent_done = False
def generate_parameters(self, parameter_id):
if parameter_id == 0:
return {'x': 0}
else:
while not self.parent_done:
time.sleep(2)
return {'x': 1}
def receive_trial_result(self, parameter_id, parameters, value):
if parameter_id == 0:
self.parent_done = True
def update_search_space(self, search_space):
pass
{
"test":
{
"_type" : "choice",
"_value" : [1, 100]
}
}
\ No newline at end of file
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import subprocess
import time
import traceback
import json
import requests
from utils import get_experiment_status, get_yml_content, parse_max_duration_time, get_succeeded_trial_num
from utils import GREEN, RED, CLEAR, STATUS_URL, TRIAL_JOBS_URL, METRICS_URL
def run_test():
'''run metrics test'''
config_file = 'metrics_test/metrics.test.yml'
print('Testing %s...' % config_file)
proc = subprocess.run(['nnictl', 'create', '--config', config_file])
assert proc.returncode == 0, '`nnictl create` failed with code %d' % proc.returncode
max_duration, max_trial_num = get_max_values(config_file)
sleep_interval = 3
for _ in range(0, max_duration, sleep_interval):
time.sleep(sleep_interval)
status = get_experiment_status(STATUS_URL)
#print('experiment status:', status)
if status == 'DONE':
num_succeeded = get_succeeded_trial_num(TRIAL_JOBS_URL)
assert num_succeeded == max_trial_num, 'only %d succeeded trial jobs, there should be %d' % (num_succeeded, max_trial_num)
check_metrics()
break
assert status == 'DONE', 'Failed to finish in maxExecDuration'
def check_metrics():
with open('metrics_test/expected_metrics.json', 'r') as f:
expected_metrics = json.load(f)
print(expected_metrics)
metrics = requests.get(METRICS_URL).json()
intermediate_result, final_result = get_metric_results(metrics)
assert len(final_result) == 1, 'there should be 1 final result'
assert final_result[0] == expected_metrics['final_result']
assert set(intermediate_result) == set(expected_metrics['intermediate_result'])
def get_metric_results(metrics):
intermediate_result = []
final_result = []
for metric in metrics:
if metric['type'] == 'PERIODICAL':
intermediate_result.append(metric['data'])
elif metric['type'] == 'FINAL':
final_result.append(metric['data'])
print(intermediate_result, final_result)
return [round(float(x),6) for x in intermediate_result], [round(float(x), 6) for x in final_result]
def get_max_values(config_file):
experiment_config = get_yml_content(config_file)
return parse_max_duration_time(experiment_config['maxExecDuration']), experiment_config['maxTrialNum']
if __name__ == '__main__':
try:
# sleep 5 seconds here, to make sure previous stopped exp has enough time to exit to avoid port conflict
time.sleep(5)
run_test()
print(GREEN + 'TEST PASS' + CLEAR)
except Exception as error:
print(RED + 'TEST FAIL' + CLEAR)
print('%r' % error)
traceback.print_exc()
raise error
finally:
subprocess.run(['nnictl', 'stop'])
{
"intermediate_result": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"final_result": 1.0
}
\ No newline at end of file
authorName: nni
experimentName: default_test
maxExecDuration: 3m
maxTrialNum: 1
trialConcurrency: 1
searchSpacePath: ./search_space.json
tuner:
builtinTunerName: Random
classArgs:
optimize_mode: maximize
trial:
codeDir: .
command: python3 trial.py
gpuNum: 0
useAnnotation: false
multiPhase: false
multiThread: false
trainingServicePlatform: local
{
"test":
{
"_type" : "choice",
"_value" : [1, 100]
}
}
\ No newline at end of file
import time
import nni
if __name__ == '__main__':
hyper_params = nni.get_next_parameter()
for i in range(10):
nni.report_intermediate_result(0.1*(i+1))
time.sleep(2)
nni.report_final_result(1.0)
...@@ -24,13 +24,8 @@ import sys ...@@ -24,13 +24,8 @@ import sys
import time import time
import traceback import traceback
from utils import check_experiment_status, fetch_nni_log_path, read_last_line, remove_files, setup_experiment from utils import is_experiment_done, fetch_nni_log_path, read_last_line, remove_files, setup_experiment
from utils import GREEN, RED, CLEAR, EXPERIMENT_URL
GREEN = '\33[32m'
RED = '\33[31m'
CLEAR = '\33[0m'
EXPERIMENT_URL = 'http://localhost:8080/api/v1/nni/experiment'
def run(): def run():
'''run naive integration test''' '''run naive integration test'''
...@@ -51,7 +46,7 @@ def run(): ...@@ -51,7 +46,7 @@ def run():
tuner_status = read_last_line('naive_test/tuner_result.txt') tuner_status = read_last_line('naive_test/tuner_result.txt')
assessor_status = read_last_line('naive_test/assessor_result.txt') assessor_status = read_last_line('naive_test/assessor_result.txt')
experiment_status = check_experiment_status(nnimanager_log_path) experiment_status = is_experiment_done(nnimanager_log_path)
assert tuner_status != 'ERROR', 'Tuner exited with error' assert tuner_status != 'ERROR', 'Tuner exited with error'
assert assessor_status != 'ERROR', 'Assessor exited with error' assert assessor_status != 'ERROR', 'Assessor exited with error'
......
#!/bin/sh
python3 -m nni_cmd.nnictl $@
#!/bin/sh
cd ../../src/nni_manager && node dist/main.js $@
local:
trainingServicePlatform: local
remote:
trainingServicePlatform: remote
machineList:
- ip:
port:
username:
passwd:
pai:
trainingServicePlatform: pai
paiConfig:
userName:
passWord:
host:
trial:
gpuNum:
cpuNum:
memoryMB:
image: msranni/latest
dataDir:
outputDir:
...@@ -23,7 +23,7 @@ import sys ...@@ -23,7 +23,7 @@ import sys
import time import time
import traceback import traceback
from utils import get_yml_content, dump_yml_content, setup_experiment, fetch_nni_log_path, check_experiment_status from utils import get_yml_content, dump_yml_content, setup_experiment, fetch_nni_log_path, is_experiment_done
GREEN = '\33[32m' GREEN = '\33[32m'
RED = '\33[31m' RED = '\33[31m'
...@@ -36,7 +36,7 @@ EXPERIMENT_URL = 'http://localhost:8080/api/v1/nni/experiment' ...@@ -36,7 +36,7 @@ EXPERIMENT_URL = 'http://localhost:8080/api/v1/nni/experiment'
def switch(dispatch_type, dispatch_name): def switch(dispatch_type, dispatch_name):
'''Change dispatch in config.yml''' '''Change dispatch in config.yml'''
config_path = 'sdk_test/local.yml' config_path = 'tuner_test/local.yml'
experiment_config = get_yml_content(config_path) experiment_config = get_yml_content(config_path)
if dispatch_name in ['GridSearch', 'BatchTuner']: if dispatch_name in ['GridSearch', 'BatchTuner']:
experiment_config[dispatch_type.lower()] = { experiment_config[dispatch_type.lower()] = {
...@@ -56,7 +56,7 @@ def test_builtin_dispatcher(dispatch_type, dispatch_name): ...@@ -56,7 +56,7 @@ def test_builtin_dispatcher(dispatch_type, dispatch_name):
switch(dispatch_type, dispatch_name) switch(dispatch_type, dispatch_name)
print('Testing %s...' % dispatch_name) print('Testing %s...' % dispatch_name)
proc = subprocess.run(['nnictl', 'create', '--config', 'sdk_test/local.yml']) proc = subprocess.run(['nnictl', 'create', '--config', 'tuner_test/local.yml'])
assert proc.returncode == 0, '`nnictl create` failed with code %d' % proc.returncode assert proc.returncode == 0, '`nnictl create` failed with code %d' % proc.returncode
nnimanager_log_path = fetch_nni_log_path(EXPERIMENT_URL) nnimanager_log_path = fetch_nni_log_path(EXPERIMENT_URL)
...@@ -64,7 +64,7 @@ def test_builtin_dispatcher(dispatch_type, dispatch_name): ...@@ -64,7 +64,7 @@ def test_builtin_dispatcher(dispatch_type, dispatch_name):
for _ in range(20): for _ in range(20):
time.sleep(3) time.sleep(3)
# check if experiment is done # check if experiment is done
experiment_status = check_experiment_status(nnimanager_log_path) experiment_status = is_experiment_done(nnimanager_log_path)
if experiment_status: if experiment_status:
break break
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment