Unverified Commit 851955e6 authored by SparkSnail's avatar SparkSnail Committed by GitHub
Browse files

NNICTL set classArgs as optional (#374)

In nnictl, classArgs is not required, now set it as optional for some kind of tuner and assessor may not require classArgs.
parent 28e26ae9
jobs: jobs:
- job: 'Install_through_pip'
pool:
vmImage: 'Ubuntu 16.04'
strategy:
matrix:
Python36:
PYTHON_VERSION: '3.6'
steps:
- script: python3 -m pip install --upgrade pip setuptools
displayName: 'Install python tools'
- script: |
python3 -m pip install nni --user
displayName: 'Install nni toolkit via pip'
- script: |
cd test
PATH=$HOME/.local/bin:$PATH python3 naive_test.py
displayName: 'Integration tests'
- script: |
cd test
PATH=$HOME/.local/bin:$PATH python3 sdk_test.py
displayName: 'Built-in tuner tests'
- job: 'Install_through_source_code' - job: 'Install_through_source_code'
pool: pool:
vmImage: 'Ubuntu 16.04' vmImage: 'Ubuntu 16.04'
......
...@@ -168,10 +168,10 @@ machineList: ...@@ -168,10 +168,10 @@ machineList:
* __builtinTunerName__ and __classArgs__ * __builtinTunerName__ and __classArgs__
* __builtinTunerName__ * __builtinTunerName__
__builtinTunerName__ specifies the name of system tuner you want to use, nni sdk provides four kinds of tuner, including {__TPE__, __Random__, __Anneal__, __Evolution__} __builtinTunerName__ specifies the name of system tuner you want to use, nni sdk provides four kinds of tuner, including {__TPE__, __Random__, __Anneal__, __Evolution__, __BatchTuner__, __GridSearch__}
* __classArgs__ * __classArgs__
__classArgs__ specifies the arguments of tuner algorithm __classArgs__ specifies the arguments of tuner algorithm. If the __builtinTunerName__ is in {__TPE__, __Random__, __Anneal__, __Evolution__}, you should set __optimize_mode__.
* __codeDir__, __classFileName__, __className__ and __classArgs__ * __codeDir__, __classFileName__, __className__ and __classArgs__
* __codeDir__ * __codeDir__
......
...@@ -47,7 +47,7 @@ class BatchTuner(Tuner): ...@@ -47,7 +47,7 @@ class BatchTuner(Tuner):
} }
''' '''
def __init__(self, optimize_mode): def __init__(self):
self.count = -1 self.count = -1
self.values = [] self.values = []
......
...@@ -48,7 +48,7 @@ class GridSearchTuner(Tuner): ...@@ -48,7 +48,7 @@ class GridSearchTuner(Tuner):
and sample and then change the sampled value back. and sample and then change the sampled value back.
''' '''
def __init__(self, optimize_mode): def __init__(self):
self.count = -1 self.count = -1
self.expanded_search_space = [] self.expanded_search_space = []
......
...@@ -29,7 +29,7 @@ GREEN = '\33[32m' ...@@ -29,7 +29,7 @@ GREEN = '\33[32m'
RED = '\33[31m' RED = '\33[31m'
CLEAR = '\33[0m' CLEAR = '\33[0m'
TUNER_LIST = ['BatchTuner', 'TPE', 'Random', 'Anneal', 'Evolution'] TUNER_LIST = ['GridSearch', 'BatchTuner', 'TPE', 'Random', 'Anneal', 'Evolution']
ASSESSOR_LIST = ['Medianstop'] ASSESSOR_LIST = ['Medianstop']
EXPERIMENT_URL = 'http://localhost:8080/api/v1/nni/experiment' EXPERIMENT_URL = 'http://localhost:8080/api/v1/nni/experiment'
...@@ -38,6 +38,11 @@ def switch(dispatch_type, dispatch_name): ...@@ -38,6 +38,11 @@ def switch(dispatch_type, dispatch_name):
'''Change dispatch in config.yml''' '''Change dispatch in config.yml'''
config_path = 'sdk_test/local.yml' config_path = 'sdk_test/local.yml'
experiment_config = get_yml_content(config_path) experiment_config = get_yml_content(config_path)
if dispatch_name in ['GridSearch', 'BatchTuner']:
experiment_config[dispatch_type.lower()] = {
'builtin' + dispatch_type + 'Name': dispatch_name
}
else:
experiment_config[dispatch_type.lower()] = { experiment_config[dispatch_type.lower()] = {
'builtin' + dispatch_type + 'Name': dispatch_name, 'builtin' + dispatch_type + 'Name': dispatch_name,
'classArgs': { 'classArgs': {
......
...@@ -34,12 +34,14 @@ Optional('multiPhase'): bool, ...@@ -34,12 +34,14 @@ Optional('multiPhase'): bool,
Optional('multiThread'): bool, Optional('multiThread'): bool,
'useAnnotation': bool, 'useAnnotation': bool,
'tuner': Or({ 'tuner': Or({
'builtinTunerName': Or('TPE', 'Random', 'Anneal', 'Evolution', 'SMAC', 'BatchTuner', 'GridSearch'), 'builtinTunerName': Or('TPE', 'Random', 'Anneal', 'SMAC', 'Evolution'),
'classArgs': { Optional('classArgs'): {
'optimize_mode': Or('maximize', 'minimize'), 'optimize_mode': Or('maximize', 'minimize')
Optional('speed'): int
}, },
Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999), Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999),
},{
'builtinTunerName': Or('BatchTuner', 'GridSearch'),
Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999),
},{ },{
'codeDir': os.path.exists, 'codeDir': os.path.exists,
'classFileName': str, 'classFileName': str,
...@@ -49,8 +51,10 @@ Optional('multiThread'): bool, ...@@ -49,8 +51,10 @@ Optional('multiThread'): bool,
}), }),
Optional('assessor'): Or({ Optional('assessor'): Or({
'builtinAssessorName': lambda x: x in ['Medianstop'], 'builtinAssessorName': lambda x: x in ['Medianstop'],
'classArgs': { Optional('classArgs'): {
'optimize_mode': lambda x: x in ['maximize', 'minimize']}, Optional('optimize_mode'): Or('maximize', 'minimize'),
Optional('start_step'): And(int, lambda x: 0 <= x <= 9999)
},
Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999) Optional('gpuNum'): And(int, lambda x: 0 <= x <= 99999)
},{ },{
'codeDir': os.path.exists, 'codeDir': os.path.exists,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment