"test/vscode:/vscode.git/clone" did not exist on "05e81ed342cf52413bc16add68fa575c05bf4ba2"
config_schema.py 18.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

import os
from schema import Schema, And, Use, Optional, Regex, Or
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from .constants import SCHEMA_TYPE_ERROR, SCHEMA_RANGE_ERROR, SCHEMA_PATH_ERROR


def setType(key, type):
    '''check key type'''
    return And(type, error=SCHEMA_TYPE_ERROR % (key, type.__name__))

def setChoice(key, *args):
    '''check choice'''
    return And(lambda n: n in args, error=SCHEMA_RANGE_ERROR % (key, str(args)))

def setNumberRange(key, keyType, start, end):
    '''check number range'''
    return And(
        And(keyType, error=SCHEMA_TYPE_ERROR % (key, keyType.__name__)),
        And(lambda n: start <= n <= end, error=SCHEMA_RANGE_ERROR % (key, '(%s,%s)' % (start, end))),
    )

def setPathCheck(key):
    '''check if path exist'''
    return And(os.path.exists, error=SCHEMA_PATH_ERROR % key)
44

45
common_schema = {
46
47
48
49
50
51
52
53
54
55
56
57
58
    'authorName': setType('authorName', str),
    'experimentName': setType('experimentName', str),
    Optional('description'): setType('description', str),
    'trialConcurrency': setNumberRange('trialConcurrency', int, 1, 99999),
    Optional('maxExecDuration'): And(Regex(r'^[1-9][0-9]*[s|m|h|d]$',error='ERROR: maxExecDuration format is [digit]{s,m,h,d}')),
    Optional('maxTrialNum'): setNumberRange('maxTrialNum', int, 1, 99999),
    'trainingServicePlatform': setChoice('trainingServicePlatform', 'remote', 'local', 'pai', 'kubeflow', 'frameworkcontroller'),
    Optional('searchSpacePath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'searchSpacePath'),
    Optional('multiPhase'): setType('multiPhase', bool),
    Optional('multiThread'): setType('multiThread', bool),
    Optional('nniManagerIp'): setType('nniManagerIp', str),
    Optional('logDir'): And(os.path.isdir, error=SCHEMA_PATH_ERROR % 'logDir'),
    Optional('debug'): setType('debug', bool),
59
    Optional('versionCheck'): setType('versionCheck', bool),
60
61
62
63
64
65
66
    Optional('logLevel'): setChoice('logLevel', 'trace', 'debug', 'info', 'warning', 'error', 'fatal'),
    Optional('logCollection'): setChoice('logCollection', 'http', 'none'),
    'useAnnotation': setType('useAnnotation', bool),
    Optional('tuner'): dict,
    Optional('advisor'): dict,
    Optional('assessor'): dict,
    Optional('localConfig'): {
67
68
69
        Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
        Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
        Optional('useActiveGpu'): setType('useActiveGpu', bool)
70
71
72
    }
}
tuner_schema_dict = {
73
74
    ('TPE', 'Anneal', 'SMAC'): {
        'builtinTunerName': setChoice('builtinTunerName', 'TPE', 'Anneal', 'SMAC'),
75
76
77
78
79
        Optional('classArgs'): {
            'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
        },
        Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
QuanluZhang's avatar
QuanluZhang committed
80
    },
81
82
83
84
    ('Evolution'): {
        'builtinTunerName': setChoice('builtinTunerName', 'Evolution'),
        Optional('classArgs'): {
            'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
Lee's avatar
Lee committed
85
            Optional('population_size'): setNumberRange('population_size', int, 0, 99999),
86
        },
87
        Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
88
89
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
    },
90
91
    ('BatchTuner', 'GridSearch', 'Random'): {
        'builtinTunerName': setChoice('builtinTunerName', 'BatchTuner', 'GridSearch', 'Random'),
92
        Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
93
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
Shufan Huang's avatar
Shufan Huang committed
94
    },
xuehui's avatar
xuehui committed
95
96
97
98
99
100
101
102
103
104
    'TPE': {
        'builtinTunerName': 'TPE',
        'classArgs': {
            Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
            Optional('parallel_optimize'): setType('parallel_optimize', bool),
            Optional('constant_liar_type'): setChoice('constant_liar_type', 'min', 'max', 'mean')
        },
        Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
    },
105
106
107
108
109
110
111
112
113
    'NetworkMorphism': {
        'builtinTunerName': 'NetworkMorphism',
        'classArgs': {
            Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
            Optional('task'): setChoice('task', 'cv','nlp','common'),
            Optional('input_width'): setType('input_width', int),
            Optional('input_channel'): setType('input_channel', int),
            Optional('n_output_node'): setType('n_output_node', int),
            },
114
        Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
115
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
116
    },
117
118
119
120
121
122
123
124
125
    'MetisTuner': {
        'builtinTunerName': 'MetisTuner',
        'classArgs': {
            Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
            Optional('no_resampling'): setType('no_resampling', bool),
            Optional('no_candidates'): setType('no_candidates', bool),
            Optional('selection_num_starting_points'):  setType('selection_num_starting_points', int),
            Optional('cold_start_num'): setType('cold_start_num', int),
            },
126
        Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
127
128
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
    },
Guoxin's avatar
Guoxin committed
129
130
131
132
133
134
135
136
137
138
139
140
141
    'GPTuner': {
        'builtinTunerName': 'GPTuner',
        'classArgs': {
            Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
            Optional('utility'): setChoice('utility', 'ei', 'ucb', 'poi'),
            Optional('kappa'): setType('kappa', float),
            Optional('xi'): setType('xi', float),
            Optional('nu'): setType('nu', float),
            Optional('alpha'): setType('alpha', float),
            Optional('cold_start_num'): setType('cold_start_num', int),
            Optional('selection_num_warm_up'):  setType('selection_num_warm_up', int),
            Optional('selection_num_starting_points'):  setType('selection_num_starting_points', int),
            },
142
        Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool), 
Guoxin's avatar
Guoxin committed
143
144
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
    },
145
146
147
148
149
    'customized': {
        'codeDir': setPathCheck('codeDir'),
        'classFileName': setType('classFileName', str),
        'className': setType('className', str),
        Optional('classArgs'): dict,
150
        Optional('includeIntermediateResults'): setType('includeIntermediateResults', bool),
151
152
153
154
155
156
157
158
159
160
161
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
    }
}

advisor_schema_dict = {
    'Hyperband':{
        'builtinAdvisorName': Or('Hyperband'),
        'classArgs': {
            'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
            Optional('R'): setType('R', int),
            Optional('eta'): setType('eta', int)
xuehui's avatar
xuehui committed
162
        },
163
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
164
    },
165
166
167
168
169
170
    'BOHB':{
        'builtinAdvisorName': Or('BOHB'),
        'classArgs': {
            'optimize_mode': setChoice('optimize_mode', 'maximize', 'minimize'),
            Optional('min_budget'): setNumberRange('min_budget', int, 0, 9999),
            Optional('max_budget'): setNumberRange('max_budget', int, 0, 9999),
171
            Optional('eta'):setNumberRange('eta', int, 0, 9999),
172
173
174
175
176
177
178
179
            Optional('min_points_in_model'): setNumberRange('min_points_in_model', int, 0, 9999),
            Optional('top_n_percent'): setNumberRange('top_n_percent', int, 1, 99),
            Optional('num_samples'): setNumberRange('num_samples', int, 1, 9999),
            Optional('random_fraction'): setNumberRange('random_fraction', float, 0, 9999),
            Optional('bandwidth_factor'): setNumberRange('bandwidth_factor', float, 0, 9999),
            Optional('min_bandwidth'): setNumberRange('min_bandwidth', float, 0, 9999),
        },
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
180
    },
181
182
183
184
185
186
187
    'customized':{
        'codeDir': setPathCheck('codeDir'),
        'classFileName': setType('classFileName', str),
        'className': setType('className', str),
        Optional('classArgs'): dict,
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
    }
188
}
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216

assessor_schema_dict = {
    'Medianstop': {
        'builtinAssessorName': 'Medianstop',
        Optional('classArgs'): {
            Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
            Optional('start_step'): setNumberRange('start_step', int, 0, 9999),
        },
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
    },
    'Curvefitting': {
        'builtinAssessorName': 'Curvefitting',
        Optional('classArgs'): {
            'epoch_num': setNumberRange('epoch_num', int, 0, 9999),
            Optional('optimize_mode'): setChoice('optimize_mode', 'maximize', 'minimize'),
            Optional('start_step'): setNumberRange('start_step', int, 0, 9999),
            Optional('threshold'): setNumberRange('threshold', float, 0, 9999),
            Optional('gap'): setNumberRange('gap', int, 1, 9999),
        },
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
    },
    'customized': {
        'codeDir': setPathCheck('codeDir'),
        'classFileName': setType('classFileName', str),
        'className': setType('className', str),
        Optional('classArgs'): dict,
        Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999)
    }
217
218
219
220
}

common_trial_schema = {
'trial':{
221
222
    'command': setType('command', str),
    'codeDir': setPathCheck('codeDir'),
SparkSnail's avatar
SparkSnail committed
223
    Optional('gpuNum'): setNumberRange('gpuNum', int, 0, 99999),
224
    Optional('nasMode'): setChoice('classic_mode', 'enas_mode', 'oneshot_mode')
225
226
227
228
229
    }
}

pai_trial_schema = {
'trial':{
230
231
232
233
234
235
    'command': setType('command', str),
    'codeDir': setPathCheck('codeDir'),
    'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
    'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
    'memoryMB': setType('memoryMB', int),
    'image': setType('image', str),
236
237
    Optional('authFile'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
                         error='ERROR: authFile format error, authFile format is hdfs://xxx.xxx.xxx.xxx:xxx'),
238
239
240
241
242
243
    Optional('shmMB'): setType('shmMB', int),
    Optional('dataDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
                         error='ERROR: dataDir format error, dataDir format is hdfs://xxx.xxx.xxx.xxx:xxx'),
    Optional('outputDir'): And(Regex(r'hdfs://(([0-9]{1,3}.){3}[0-9]{1,3})(:[0-9]{2,5})?(/.*)?'),\
                         error='ERROR: outputDir format error, outputDir format is hdfs://xxx.xxx.xxx.xxx:xxx'),
    Optional('virtualCluster'): setType('virtualCluster', str),
Zejun Lin's avatar
Zejun Lin committed
244
    Optional('nasMode'): setChoice('classic_mode', 'enas_mode', 'oneshot_mode')
245
246
247
248
    }
}

pai_config_schema = {
249
250
251
252
253
    'paiConfig':{
        'userName': setType('userName', str),
        'passWord': setType('passWord', str),
        'host': setType('host', str)
    }
254
255
}

256
257
kubeflow_trial_schema = {
'trial':{
258
        'codeDir':  setPathCheck('codeDir'),
Zejun Lin's avatar
Zejun Lin committed
259
        Optional('nasMode'): setChoice('classic_mode', 'enas_mode', 'oneshot_mode'),
260
        Optional('ps'): {
261
262
263
264
265
            'replicas': setType('replicas', int),
            'command': setType('command', str),
            'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
            'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
            'memoryMB': setType('memoryMB', int),
266
267
            'image': setType('image', str),
            Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
268
        },
269
        Optional('master'): {
270
271
272
273
274
            'replicas': setType('replicas', int),
            'command': setType('command', str),
            'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
            'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
            'memoryMB': setType('memoryMB', int),
275
276
            'image': setType('image', str),
            Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
277
        },
278
        Optional('worker'):{
279
280
281
282
283
            'replicas': setType('replicas', int),
            'command': setType('command', str),
            'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
            'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
            'memoryMB': setType('memoryMB', int),
284
285
            'image': setType('image', str),
            Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
286
        }
287
288
289
290
    }
}

kubeflow_config_schema = {
SparkSnail's avatar
SparkSnail committed
291
    'kubeflowConfig':Or({
292
293
294
        'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'),
        'apiVersion': setType('apiVersion', str),
        Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
295
        'nfs': {
296
297
            'server': setType('server', str),
            'path': setType('path', str)
298
        }
SparkSnail's avatar
SparkSnail committed
299
    },{
300
301
302
        'operator': setChoice('operator', 'tf-operator', 'pytorch-operator'),
        'apiVersion': setType('apiVersion', str),
        Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
SparkSnail's avatar
SparkSnail committed
303
        'keyVault': {
304
305
306
307
            'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
                         error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'),
            'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
                    error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)')
SparkSnail's avatar
SparkSnail committed
308
309
        },
        'azureStorage': {
310
311
312
313
            'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'),\
                           error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'),
            'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'),\
                          error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)')
SparkSnail's avatar
SparkSnail committed
314
315
        }
    })
316
317
}

318
319
frameworkcontroller_trial_schema = {
    'trial':{
320
        'codeDir':  setPathCheck('codeDir'),
321
        'taskRoles': [{
322
323
            'name': setType('name', str),
            'taskNum': setType('taskNum', int),
324
            'frameworkAttemptCompletionPolicy': {
325
326
                'minFailedTaskCount': setType('minFailedTaskCount', int),
                'minSucceededTaskCount': setType('minSucceededTaskCount', int),
327
            },
328
329
330
331
            'command': setType('command', str),
            'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
            'cpuNum': setNumberRange('cpuNum', int, 0, 99999),
            'memoryMB': setType('memoryMB', int),
332
333
            'image': setType('image', str),
            Optional('privateRegistryAuthPath'): And(os.path.exists, error=SCHEMA_PATH_ERROR % 'privateRegistryAuthPath')
334
335
336
337
338
339
        }]
    }
}

frameworkcontroller_config_schema = {
    'frameworkcontrollerConfig':Or({
340
341
        Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
        Optional('serviceAccountName'): setType('serviceAccountName', str),
342
        'nfs': {
343
344
            'server': setType('server', str),
            'path': setType('path', str)
345
346
        }
    },{
347
348
        Optional('storage'): setChoice('storage', 'nfs', 'azureStorage'),
        Optional('serviceAccountName'): setType('serviceAccountName', str),
349
        'keyVault': {
350
351
352
353
            'vaultName': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
                         error='ERROR: vaultName format error, vaultName support using (0-9|a-z|A-Z|-)'),
            'name': And(Regex('([0-9]|[a-z]|[A-Z]|-){1,127}'),\
                    error='ERROR: name format error, name support using (0-9|a-z|A-Z|-)')
354
355
        },
        'azureStorage': {
356
357
358
359
            'accountName': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,31}'),\
                           error='ERROR: accountName format error, accountName support using (0-9|a-z|A-Z|-)'),
            'azureShare': And(Regex('([0-9]|[a-z]|[A-Z]|-){3,63}'),\
                          error='ERROR: azureShare format error, azureShare support using (0-9|a-z|A-Z|-)')
360
361
362
363
        }
    })
}

364
machine_list_schema = {
365
Optional('machineList'):[Or({
366
367
368
369
    'ip': setType('ip', str),
    Optional('port'): setNumberRange('port', int, 1, 65535),
    'username': setType('username', str),
    'passwd': setType('passwd', str),
370
371
372
    Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
    Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
    Optional('useActiveGpu'): setType('useActiveGpu', bool)
373
    },{
374
375
376
377
378
    'ip': setType('ip', str),
    Optional('port'): setNumberRange('port', int, 1, 65535),
    'username': setType('username', str),
    'sshKeyPath': setPathCheck('sshKeyPath'),
    Optional('passphrase'): setType('passphrase', str),
379
380
381
    Optional('gpuIndices'): Or(int, And(str, lambda x: len([int(i) for i in x.split(',')]) > 0), error='gpuIndex format error!'),
    Optional('maxTrialNumPerGpu'): setType('maxTrialNumPerGpu', int),
    Optional('useActiveGpu'): setType('useActiveGpu', bool)
382
})]
383
}
384
385
386

LOCAL_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema})

387
REMOTE_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema, **machine_list_schema})
388

389
390
391
PAI_CONFIG_SCHEMA = Schema({**common_schema, **pai_trial_schema, **pai_config_schema})

KUBEFLOW_CONFIG_SCHEMA = Schema({**common_schema, **kubeflow_trial_schema, **kubeflow_config_schema})
392
393

FRAMEWORKCONTROLLER_CONFIG_SCHEMA = Schema({**common_schema, **frameworkcontroller_trial_schema, **frameworkcontroller_config_schema})