Commit 593a275c authored by Yuge Zhang's avatar Yuge Zhang
Browse files

Merge branch 'master' of https://github.com/microsoft/nni into dev-retiarii

parents b3cdee85 683c458a
# Dockerfile for building AdaptDL-enabled CIFAR10 image
# Set docker build context to current folder
FROM pytorch/pytorch:1.4-cuda10.1-cudnn7-runtime
RUN pip install nni adaptdl tensorboard
COPY ./ /cifar10
......@@ -17,10 +17,11 @@ tuner:
#choice: maximize, minimize
optimize_mode: maximize
trial:
namespace: default
command: python3 /cifar10/main_adl.py
codeDir: /cifar10
gpuNum: 1
image: {replace_with_the_image_that_has_adaptdl_installed}
image: {image_built_by_adl.Dockerfile}
# optional
imagePullSecrets:
- name: {secret}
......
algoType: tuner
builtinName: demotuner
className: demo_tuner.DemoTuner
classArgsValidator: demo_tuner.MyClassArgsValidator
......@@ -12,10 +12,8 @@ setuptools.setup(
classifiers = [
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: ',
'NNI Package :: tuner :: demotuner :: demo_tuner.DemoTuner :: demo_tuner.MyClassArgsValidator'
'Operating System :: '
],
author = 'Microsoft NNI Team',
author_email = 'nni@microsoft.com',
description = 'NNI control for Neural Network Intelligence project',
......
......@@ -169,7 +169,7 @@ class GridSearchTuner(Tuner):
"""
self.count += 1
while self.count <= len(self.expanded_search_space) - 1:
_params_tuple = convert_dict2tuple(self.expanded_search_space[self.count])
_params_tuple = convert_dict2tuple(copy.deepcopy(self.expanded_search_space[self.count]))
if _params_tuple in self.supplement_data:
self.count += 1
else:
......@@ -203,6 +203,6 @@ class GridSearchTuner(Tuner):
if not _value:
logger.info("Useless trial data, value is %s, skip this trial data.", _value)
continue
_params_tuple = convert_dict2tuple(_params)
_params_tuple = convert_dict2tuple(copy.deepcopy(_params))
self.supplement_data[_params_tuple] = True
logger.info("Successfully import data to grid search tuner.")
from .model import NlpTrialStats, NlpIntermediateStats, NlpTrialConfig
from .query import query_nlp_trial_stats
import json
import os
import argparse
import tqdm
from .model import db, NlpTrialConfig, NlpTrialStats, NlpIntermediateStats
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='Path to extracted NLP data dir.')
args = parser.parse_args()
with db, tqdm.tqdm(total=len(os.listdir(args.input_dir)), desc="creating tables") as pbar:
db.create_tables([NlpTrialConfig, NlpTrialStats, NlpIntermediateStats])
json_files = os.listdir(args.input_dir)
for json_file in json_files:
pbar.update(1)
if json_file.endswith('.json'):
log_path = os.path.join(args.input_dir, json_file)
cur = json.load(open(log_path, 'r'))
arch = json.loads(cur['recepie'])
unested_arch = {}
for k in arch.keys():
# print(k)
unested_arch['{}_op'.format(k)] = arch[k]['op']
for i in range(len(arch[k]['input'])):
unested_arch['{}_input_{}'.format(k, i)] = arch[k]['input'][i]
config = NlpTrialConfig.create(arch=unested_arch, dataset=cur['data'][5:])
if cur['status'] == 'OK':
trial_stats = NlpTrialStats.create(config=config, train_loss=cur['train_losses'][-1], val_loss=cur['val_losses'][-1],
test_loss=cur['test_losses'][-1], training_time=cur['wall_times'][-1])
epochs = 50
intermediate_stats = []
for epoch in range(epochs):
epoch_res = {
'train_loss' : cur['train_losses'][epoch],
'val_loss' : cur['val_losses'][epoch],
'test_loss' : cur['test_losses'][epoch],
'training_time' : cur['wall_times'][epoch]
}
epoch_res.update(current_epoch=epoch + 1, trial=trial_stats)
intermediate_stats.append(epoch_res)
NlpIntermediateStats.insert_many(intermediate_stats).execute(db)
if __name__ == '__main__':
main()
import os
from peewee import CharField, FloatField, ForeignKeyField, IntegerField, Model
from playhouse.sqlite_ext import JSONField, SqliteExtDatabase
from nni.nas.benchmarks.utils import json_dumps
from nni.nas.benchmarks.constants import DATABASE_DIR
db = SqliteExtDatabase(os.path.join(DATABASE_DIR, 'nlp.db'), autoconnect=True)
class NlpTrialConfig(Model):
"""
Trial config for NLP. epoch_num is fixed at 50.
Attributes
----------
arch: dict
aka recepie in NAS-NLP-Benchmark repo (https://github.com/fmsnew/nas-bench-nlp-release).
an arch has multiple Node, Node_input_n and Node_op.
``Node`` can be ``node_n`` or ``h_new_n`` or ``f/i/o/j(_act)`` etc. (n is an int number and need not to be consecutive)
``Node_input_n`` can be ``Node`` or ``x`` etc.
``Node_op`` can be ``linear`` or ``activation_sigm`` or ``activation_tanh`` or ``elementwise_prod``
or ``elementwise_sum`` or ``activation_leaky_relu`` ...
e.g., {"h_new_0_input_0":"node_3","h_new_0_input_1":"x","h_new_0_op":"linear","node_2_input_0":"x",
"node_2_input_1":"h_prev_0","node_2_op":"linear","node_3_input_0":"node_2","node_3_op":"activation_leaky_relu"}
dataset: str
Dataset used. Could be ``ptb`` or ``wikitext-2``.
"""
arch = JSONField(json_dumps=json_dumps, index=True)
dataset = CharField(max_length=15, index=True, choices=[
'ptb',
'wikitext-2'
])
class Meta:
database = db
class NlpTrialStats(Model):
"""
Computation statistics for NAS-NLP-Benchmark.
Each corresponds to one trial result after 50 epoch.
Attributes
----------
config : NlpTrialConfig
Corresponding config for trial.
train_loss : float or None
Final loss on training data. Could be NaN (None).
val_loss : float or None
Final loss on validation data. Could be NaN (None).
test_loss : float or None
Final loss on test data. Could be NaN (None).
training_time : float
Time elapsed in seconds. aka wall_time in in NAS-NLP-Benchmark repo.
"""
config = ForeignKeyField(NlpTrialConfig, backref='trial_stats', index=True)
train_loss = FloatField(null=True)
val_loss = FloatField(null=True)
test_loss = FloatField(null=True)
training_time = FloatField(null=True)
class Meta:
database = db
class NlpIntermediateStats(Model):
"""
Computation statistics for NAS-NLP-Benchmark.
Each corresponds to one trial result for 1-50 epoch.
Attributes
----------
config : NlpTrialConfig
Corresponding config for trial.
train_loss : float or None
Final loss on training data. Could be NaN (None).
val_loss : float or None
Final loss on validation data. Could be NaN (None).
test_loss : float or None
Final loss on test data. Could be NaN (None).
training_time : float
Time elapsed in seconds. aka wall_time in in NAS-NLP-Benchmark repo.
"""
trial = ForeignKeyField(NlpTrialStats, backref='intermediates', index=True)
current_epoch = IntegerField(index=True)
train_loss = FloatField(null=True)
val_loss = FloatField(null=True)
test_loss = FloatField(null=True)
training_time = FloatField(null=True)
class Meta:
database = db
\ No newline at end of file
import functools
from peewee import fn
from playhouse.shortcuts import model_to_dict
from .model import NlpTrialStats, NlpTrialConfig
def query_nlp_trial_stats(arch, dataset, reduction=None, include_intermediates=False):
"""
Query trial stats of NLP benchmark given conditions, including config(arch + dataset) and training results after 50 epoch.
Parameters
----------
arch : dict or None
If a dict, it is in the format that is described in
:class:`nni.nas.benchmark.nlp.NlpTrialConfig`. Only trial stats matched will be returned.
If none, all architectures in the database will be matched.
dataset : str or None
If specified, can be one of the dataset available in :class:`nni.nas.benchmark.nlp.NlpTrialConfig`.
Otherwise a wildcard.
reduction : str or None
If 'none' or None, all trial stats will be returned directly.
If 'mean', fields in trial stats will be averaged given the same trial config.
Please note that some trial configs have multiple runs which make "reduction" meaningful, while some may not.
include_intermediates : boolean
If true, intermediate results will be returned.
Returns
-------
generator of dict
A generator of :class:`nni.nas.benchmark.nlp.NlpTrialStats` objects,
where each of them has been converted into a dict.
"""
fields = []
if reduction == 'none':
reduction = None
if reduction == 'mean':
for field_name in NlpTrialStats._meta.sorted_field_names:
if field_name not in ['id', 'config']:
fields.append(fn.AVG(getattr(NlpTrialStats, field_name)).alias(field_name))
elif reduction is None:
fields.append(NlpTrialStats)
else:
raise ValueError('Unsupported reduction: \'%s\'' % reduction)
query = NlpTrialStats.select(*fields, NlpTrialConfig).join(NlpTrialConfig)
conditions = []
if arch is not None:
conditions.append(NlpTrialConfig.arch == arch)
if dataset is not None:
conditions.append(NlpTrialConfig.dataset == dataset)
for trial in query.where(functools.reduce(lambda a, b: a & b, conditions)):
if include_intermediates:
data = model_to_dict(trial)
# exclude 'trial' from intermediates as it is already available in data
data['intermediates'] = [
{k: v for k, v in model_to_dict(t).items() if k != 'trial'} for t in trial.intermediates
]
yield data
else:
yield model_to_dict(trial)
\ No newline at end of file
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import importlib
import json
from nni.tools.package_utils import read_registerd_algo_meta, get_registered_algo_meta, \
write_registered_algo_meta, ALGO_TYPES, parse_full_class_name
from .common_utils import print_error, print_green, get_yml_content
def read_reg_meta_list(meta_path):
content = get_yml_content(meta_path)
if content.get('algorithms'):
meta_list = content.get('algorithms')
else:
meta_list = [content]
for meta in meta_list:
assert 'algoType' in meta
assert meta['algoType'] in ['tuner', 'assessor', 'advisor']
assert 'builtinName' in meta
assert 'className' in meta
return meta_list
def verify_algo_import(meta):
def _do_verify_import(fullName):
module_name, class_name = parse_full_class_name(fullName)
class_module = importlib.import_module(module_name)
getattr(class_module, class_name)
_do_verify_import(meta['className'])
if meta.get('classArgsValidator'):
_do_verify_import(meta['classArgsValidator'])
def algo_reg(args):
meta_list = read_reg_meta_list(args.meta_path)
for meta in meta_list:
if get_registered_algo_meta(meta['builtinName']) is not None:
print_error('builtinName {} already registered'.format(meta['builtinName']))
return
verify_algo_import(meta)
save_algo_meta_data(meta)
print_green('{} registered sucessfully!'.format(meta['builtinName']))
def algo_unreg(args):
name = args.name[0]
meta = get_registered_algo_meta(name)
if meta is None:
print_error('builtin algorithms {} not found!'.format(name))
return
if meta['source'] == 'nni':
print_error('{} is provided by nni, can not be unregistered!'.format(name))
return
if remove_algo_meta_data(name):
print_green('{} unregistered sucessfully!'.format(name))
else:
print_error('Failed to unregistered {}!'.format(name))
def algo_show(args):
builtin_name = args.name[0]
meta = get_registered_algo_meta(builtin_name)
if meta:
print(json.dumps(meta, indent=4))
else:
print_error('package {} not found'.format(builtin_name))
def algo_list(args):
meta = read_registerd_algo_meta()
print('+-----------------+------------+-----------+--------=-------------+------------------------------------------+')
print('| Name | Type | source | Class Name | Module Name |')
print('+-----------------+------------+-----------+----------------------+------------------------------------------+')
MAX_MODULE_NAME = 38
for t in ['tuners', 'assessors', 'advisors']:
for p in meta[t]:
module_name = '.'.join(p['className'].split('.')[:-1])
if len(module_name) > MAX_MODULE_NAME:
module_name = module_name[:MAX_MODULE_NAME-3] + '...'
class_name = p['className'].split('.')[-1]
print('| {:15s} | {:10s} | {:9s} | {:20s} | {:40s} |'.format(p['builtinName'], t, p['source'], class_name, module_name[:38]))
print('+-----------------+------------+-----------+----------------------+------------------------------------------+')
def save_algo_meta_data(meta_data):
meta_data['source'] = 'user'
config = read_registerd_algo_meta()
config[meta_data['algoType']+'s'].append(meta_data)
write_registered_algo_meta(config)
def remove_algo_meta_data(name):
config = read_registerd_algo_meta()
updated = False
for t in ALGO_TYPES:
for meta in config[t]:
if meta['builtinName'] == name:
config[t].remove(meta)
updated = True
if updated:
write_registered_algo_meta(config)
return True
return False
......@@ -6,7 +6,7 @@ import logging
import os
import netifaces
from schema import Schema, And, Optional, Regex, Or, SchemaError
from nni.tools.package_utils import create_validator_instance, get_all_builtin_names, get_builtin_algo_meta
from nni.tools.package_utils import create_validator_instance, get_all_builtin_names, get_registered_algo_meta
from .constants import SCHEMA_TYPE_ERROR, SCHEMA_RANGE_ERROR, SCHEMA_PATH_ERROR
from .common_utils import get_yml_content, print_warning
......@@ -75,8 +75,8 @@ class AlgoSchema:
def validate_class_args(self, class_args, algo_type, builtin_name):
if not builtin_name or not class_args:
return
meta = get_builtin_algo_meta(algo_type+'s', builtin_name)
if meta and 'accept_class_args' in meta and meta['accept_class_args'] == False:
meta = get_registered_algo_meta(builtin_name, algo_type+'s')
if meta and 'acceptClassArgs' in meta and meta['acceptClassArgs'] == False:
raise SchemaError('classArgs is not allowed.')
logging.getLogger('nni.protocol').setLevel(logging.ERROR) # we know IPC is not there, don't complain
......@@ -268,6 +268,7 @@ adl_trial_schema = {
'command': setType('command', str),
'gpuNum': setNumberRange('gpuNum', int, 0, 99999),
'image': setType('image', str),
Optional('namespace'): setType('namespace', str),
Optional('imagePullSecrets'): [{
'name': setType('name', str)
}],
......
......@@ -61,27 +61,6 @@ TRIAL_MONITOR_CONTENT = '%-15s %-25s %-25s %-15s'
TRIAL_MONITOR_TAIL = '-------------------------------------------------------------------------------------\n\n\n'
INSTALLABLE_PACKAGE_META = {
'SMAC': {
'type': 'tuner',
'class_name': 'nni.algorithms.hpo.smac_tuner.smac_tuner.SMACTuner',
'code_sub_dir': 'smac_tuner',
'class_args_validator': 'nni.algorithms.hpo.smac_tuner.smac_tuner.SMACClassArgsValidator'
},
'BOHB': {
'type': 'advisor',
'class_name': 'nni.algorithms.hpo.bohb_advisor.bohb_advisor.BOHB',
'code_sub_dir': 'bohb_advisor',
'class_args_validator': 'nni.algorithms.hpo.bohb_advisor.bohb_advisor.BOHBClassArgsValidator'
},
'PPOTuner': {
'type': 'tuner',
'class_name': 'nni.algorithms.hpo.ppo_tuner.ppo_tuner.PPOTuner',
'code_sub_dir': 'ppo_tuner',
'class_args_validator': 'nni.algorithms.hpo.ppo_tuner.ppo_tuner.PPOClassArgsValidator'
}
}
TUNERS_SUPPORTING_IMPORT_DATA = {
'TPE',
'Anneal',
......
......@@ -19,7 +19,7 @@ from .config_utils import Config, Experiments
from .common_utils import get_yml_content, get_json_content, print_error, print_normal, \
detect_port, get_user
from .constants import NNICTL_HOME_DIR, ERROR_INFO, REST_TIME_OUT, EXPERIMENT_SUCCESS_INFO, LOG_HEADER, INSTALLABLE_PACKAGE_META
from .constants import NNICTL_HOME_DIR, ERROR_INFO, REST_TIME_OUT, EXPERIMENT_SUCCESS_INFO, LOG_HEADER
from .command_utils import check_output_command, kill_command
from .nnictl_utils import update_experiment
......@@ -452,10 +452,9 @@ def launch_experiment(args, experiment_config, mode, experiment_id):
except CalledProcessError:
print_error('some errors happen when import package %s.' %(package_name))
print_log_content(experiment_id)
if package_name in INSTALLABLE_PACKAGE_META:
print_error('If %s is not installed, it should be installed through '\
'\'nnictl package install --name %s\'' % (package_name, package_name))
exit(1)
if package_name in ['SMAC', 'BOHB', 'PPOTuner']:
print_error(f'The dependencies for {package_name} can be installed through pip install nni[{package_name}]')
raise
log_dir = experiment_config['logDir'] if experiment_config.get('logDir') else None
log_level = experiment_config['logLevel'] if experiment_config.get('logLevel') else None
#view experiment mode do not need debug function, when view an experiment, there will be no new logs created
......
......@@ -13,7 +13,7 @@ from .nnictl_utils import stop_experiment, trial_ls, trial_kill, list_experiment
monitor_experiment, export_trials_data, trial_codegen, webui_url, \
get_config, log_stdout, log_stderr, search_space_auto_gen, webui_nas, \
save_experiment, load_experiment
from .package_management import package_install, package_uninstall, package_show, package_list
from .algo_management import algo_reg, algo_unreg, algo_show, algo_list
from .constants import DEFAULT_REST_PORT
from .tensorboard_utils import start_tensorboard, stop_tensorboard
init(autoreset=True)
......@@ -212,26 +212,43 @@ def parse_args():
parser_log_trial.add_argument('--trial_id', '-T', dest='trial_id', help='find trial log path by id')
parser_log_trial.set_defaults(func=log_trial)
#parse package command
parser_package = subparsers.add_parser('package', help='control nni tuner and assessor packages')
# add subparsers for parser_package
parser_package_subparsers = parser_package.add_subparsers()
parser_package_install = parser_package_subparsers.add_parser('install', help='install packages')
parser_package_install.add_argument('source', nargs='?', help='installation source, can be a directory or whl file')
parser_package_install.add_argument('--name', '-n', dest='name', help='package name to be installed', required=False)
parser_package_install.set_defaults(func=package_install)
#parse algo command
parser_algo = subparsers.add_parser('algo', help='control nni builtin tuner, assessor and advisor algorithms')
# add subparsers for parser_algo
parser_algo_subparsers = parser_algo.add_subparsers()
parser_algo_reg = parser_algo_subparsers.add_parser(
'register',
aliases=('reg',),
help='''register algorithms as nni builtin algorithm, for example:
nnictl reg --meta_path <path_to_meta_file>
where <path_to_meta_file> is the path to a meta data in yml format,
reference the nni document and examples/tuners/customized_tuner example
for the format of the yml file.'''
)
parser_algo_reg.add_argument('--meta_path', '-m', dest='meta_path', help='path to the meta file', required=True)
parser_algo_reg.set_defaults(func=algo_reg)
parser_package_uninstall = parser_package_subparsers.add_parser('uninstall', help='uninstall packages')
parser_package_uninstall.add_argument('name', nargs=1, help='package name to be uninstalled')
parser_package_uninstall.set_defaults(func=package_uninstall)
parser_algo_unreg = parser_algo_subparsers.add_parser('unregister', aliases=('unreg',), help='unregister algorithm')
parser_algo_unreg.add_argument('name', nargs=1, help='builtin name of the algorithm')
parser_algo_unreg.set_defaults(func=algo_unreg)
parser_package_show = parser_package_subparsers.add_parser('show', help='show the information of packages')
parser_package_show.add_argument('name', nargs=1, help='builtin name of the package')
parser_package_show.set_defaults(func=package_show)
parser_algo_show = parser_algo_subparsers.add_parser('show', help='show the information of algorithm')
parser_algo_show.add_argument('name', nargs=1, help='builtin name of the algorithm')
parser_algo_show.set_defaults(func=algo_show)
parser_package_list = parser_package_subparsers.add_parser('list', help='list installed packages')
parser_package_list.add_argument('--all', action='store_true', help='list all builtin packages')
parser_package_list.set_defaults(func=package_list)
parser_algo_list = parser_algo_subparsers.add_parser('list', help='list registered algorithms')
parser_algo_list.set_defaults(func=algo_list)
# To show message that nnictl package command is replaced by nnictl algo, to be remove in the future release.
def show_messsage_for_nnictl_package(args):
print_error('nnictl package command is replaced by nnictl algo, please run nnictl algo -h to show the usage')
parser_package_subparsers = subparsers.add_parser('package', help='control nni tuner and assessor packages').add_subparsers()
parser_package_subparsers.add_parser('install', help='install packages').set_defaults(func=show_messsage_for_nnictl_package)
parser_package_subparsers.add_parser('uninstall', help='uninstall packages').set_defaults(func=show_messsage_for_nnictl_package)
parser_package_subparsers.add_parser('show', help='show the information of packages').set_defaults(
func=show_messsage_for_nnictl_package)
parser_package_subparsers.add_parser('list', help='list installed packages').set_defaults(func=show_messsage_for_nnictl_package)
#parse tensorboard command
parser_tensorboard = subparsers.add_parser('tensorboard', help='manage tensorboard')
......
......@@ -345,9 +345,9 @@ def log_internal(args, filetype):
'''internal function to call get_log_content'''
file_name = get_config_filename(args)
if filetype == 'stdout':
file_full_path = os.path.join(NNICTL_HOME_DIR, file_name, 'stdout')
file_full_path = os.path.join(NNICTL_HOME_DIR, file_name, 'log', 'nnictl_stdout.log')
else:
file_full_path = os.path.join(NNICTL_HOME_DIR, file_name, 'stderr')
file_full_path = os.path.join(NNICTL_HOME_DIR, file_name, 'log', 'nnictl_stderr.log')
print(check_output_command(file_full_path, head=args.head, tail=args.tail))
def log_stdout(args):
......@@ -854,8 +854,9 @@ def save_experiment(args):
except IOError:
print_error('Write file to %s failed!' % os.path.join(temp_nnictl_dir, '.experiment'))
exit(1)
nnictl_config_dir = os.path.join(NNICTL_HOME_DIR, args.id)
shutil.copytree(nnictl_config_dir, os.path.join(temp_nnictl_dir, args.id))
nnictl_log_dir = os.path.join(NNICTL_HOME_DIR, args.id, 'log')
shutil.copytree(nnictl_log_dir, os.path.join(temp_nnictl_dir, args.id, 'log'))
shutil.copy(os.path.join(NNICTL_HOME_DIR, args.id, '.config'), os.path.join(temp_nnictl_dir, args.id, '.config'))
# Step3. Copy code dir
if args.saveCodeDir:
......
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from collections import defaultdict
import json
import pkginfo
import nni
from nni.tools.package_utils import read_installed_package_meta, get_installed_package_meta, \
write_package_meta, get_builtin_algo_meta, get_not_installable_builtin_names, ALGO_TYPES
from .constants import INSTALLABLE_PACKAGE_META
from .common_utils import print_error, print_green
from .command_utils import install_requirements_command, call_pip_install, call_pip_uninstall
PACKAGE_TYPES = ['tuner', 'assessor', 'advisor']
def install_by_name(package_name):
if package_name not in INSTALLABLE_PACKAGE_META:
raise RuntimeError('{} is not found in installable packages!'.format(package_name))
requirements_path = os.path.join(nni.__path__[0], 'algorithms/hpo', INSTALLABLE_PACKAGE_META[package_name]['code_sub_dir'], 'requirements.txt')
assert os.path.exists(requirements_path)
return install_requirements_command(requirements_path)
def package_install(args):
'''install packages'''
installed = False
try:
if args.name:
if install_by_name(args.name) == 0:
package_meta = {}
package_meta['type'] = INSTALLABLE_PACKAGE_META[args.name]['type']
package_meta['name'] = args.name
package_meta['class_name'] = INSTALLABLE_PACKAGE_META[args.name]['class_name']
package_meta['class_args_validator'] = INSTALLABLE_PACKAGE_META[args.name]['class_args_validator']
save_package_meta_data(package_meta)
print_green('{} installed!'.format(args.name))
installed = True
else:
package_meta = get_nni_meta(args.source)
if package_meta:
if call_pip_install(args.source) == 0:
save_package_meta_data(package_meta)
print_green('{} installed!'.format(package_meta['name']))
installed = True
except Exception as e:
print_error(e)
if not installed:
print_error('installation failed!')
def package_uninstall(args):
'''uninstall packages'''
name = args.name[0]
if name in get_not_installable_builtin_names():
print_error('{} can not be uninstalled!'.format(name))
exit(1)
meta = get_installed_package_meta(None, name)
if meta is None:
print_error('package {} not found!'.format(name))
return
if 'installed_package' in meta:
call_pip_uninstall(meta['installed_package'])
if remove_package_meta_data(name):
print_green('{} uninstalled sucessfully!'.format(name))
else:
print_error('Failed to uninstall {}!'.format(name))
def package_show(args):
'''show specified packages'''
builtin_name = args.name[0]
meta = get_builtin_algo_meta(builtin_name=builtin_name)
if meta:
print(json.dumps(meta, indent=4))
else:
print_error('package {} not found'.format(builtin_name))
def print_package_list(meta):
print('+-----------------+------------+-----------+--------=-------------+------------------------------------------+')
print('| Name | Type | Installed | Class Name | Module Name |')
print('+-----------------+------------+-----------+----------------------+------------------------------------------+')
MAX_MODULE_NAME = 38
for t in ['tuners', 'assessors', 'advisors']:
for p in meta[t]:
module_name = '.'.join(p['class_name'].split('.')[:-1])
if len(module_name) > MAX_MODULE_NAME:
module_name = module_name[:MAX_MODULE_NAME-3] + '...'
class_name = p['class_name'].split('.')[-1]
print('| {:15s} | {:10s} | {:9s} | {:20s} | {:40s} |'.format(p['name'], t, p['installed'], class_name, module_name[:38]))
print('+-----------------+------------+-----------+----------------------+------------------------------------------+')
def package_list(args):
'''list all packages'''
if args.all:
meta = get_builtin_algo_meta()
else:
meta = read_installed_package_meta()
installed_names = defaultdict(list)
for t in ['tuners', 'assessors', 'advisors']:
for p in meta[t]:
p['installed'] = 'Yes'
installed_names[t].append(p['name'])
for k, v in INSTALLABLE_PACKAGE_META.items():
t = v['type']+'s'
if k not in installed_names[t]:
meta[t].append({
'name': k,
'class_name': v['class_name'],
'class_args_validator': v['class_args_validator'],
'installed': 'No'
})
print_package_list(meta)
def save_package_meta_data(meta_data):
assert meta_data['type'] in PACKAGE_TYPES
assert 'name' in meta_data
assert 'class_name' in meta_data
config = read_installed_package_meta()
if meta_data['name'] in [x['name'] for x in config[meta_data['type']+'s']]:
raise ValueError('name %s already installed' % meta_data['name'])
package_meta = {k: meta_data[k] for k in ['name', 'class_name', 'class_args_validator'] if k in meta_data}
if 'package_name' in meta_data:
package_meta['installed_package'] = meta_data['package_name']
config[meta_data['type']+'s'].append(package_meta)
write_package_meta(config)
def remove_package_meta_data(name):
config = read_installed_package_meta()
updated = False
for t in ALGO_TYPES:
for meta in config[t]:
if meta['name'] == name:
config[t].remove(meta)
updated = True
if updated:
write_package_meta(config)
return True
return False
def get_nni_meta(source):
if not os.path.exists(source):
print_error('{} does not exist'.format(source))
return None
if os.path.isdir(source):
if not os.path.exists(os.path.join(source, 'setup.py')):
print_error('setup.py not found')
return None
pkg = pkginfo.Develop(source)
else:
if not source.endswith('.whl'):
print_error('File name {} must ends with \'.whl\''.format(source))
return False
pkg = pkginfo.Wheel(source)
classifiers = pkg.classifiers
meta = parse_classifiers(classifiers)
meta['package_name'] = pkg.name
return meta
def parse_classifiers(classifiers):
parts = []
for c in classifiers:
if c.startswith('NNI Package'):
parts = [x.strip() for x in c.split('::')]
break
if len(parts) < 4 or not all(parts):
raise ValueError('Can not find correct NNI meta data in package classifiers.')
meta = {
'type': parts[1],
'name': parts[2],
'class_name': parts[3]
}
if len(parts) >= 5:
meta['class_args_validator'] = parts[4]
return meta
......@@ -6,18 +6,13 @@ import importlib
import os
from pathlib import Path
import sys
import ruamel.yaml as yaml
import nni
from .constants import BuiltinAlgorithms
ALGO_TYPES = ['tuners', 'assessors', 'advisors']
def get_all_builtin_names(algo_type):
"""Get all valid builtin names, including:
1. BuiltinAlgorithms which is pre-installed.
2. User installed packages in <nni_installation_path>/config/installed_packages.yml
"""Get all builtin names of registered algorithms of specified type
Parameters
----------
......@@ -30,109 +25,33 @@ def get_all_builtin_names(algo_type):
all builtin tuner names.
"""
assert algo_type in ALGO_TYPES
merged_dict = _get_merged_builtin_dict()
builtin_names = [x['name'] for x in merged_dict[algo_type]]
return builtin_names
def get_not_installable_builtin_names(algo_type=None):
"""Get builtin names in BuiltinAlgorithms which do not need to be installed
and can be used once NNI is installed.
return [x['builtinName'] for x in read_registerd_algo_meta()[algo_type]]
Parameters
----------
algo_type: str | None
can be one of 'tuners', 'assessors', 'advisors' or None
Returns: list of string
-------
All builtin names of specified type, for example, if algo_type is 'tuners', returns
all builtin tuner names.
If algo_type is None, returns all builtin names of all types.
"""
if algo_type is None:
meta = BuiltinAlgorithms
else:
assert algo_type in ALGO_TYPES
meta = {
algo_type: BuiltinAlgorithms[algo_type]
}
names = []
for t in ALGO_TYPES:
if t in meta:
names.extend([x['name'] for x in meta[t]])
return names
def get_builtin_algo_meta(algo_type=None, builtin_name=None):
""" Get meta information of builtin algorithms from:
1. Pre-installed BuiltinAlgorithms
2. User installed packages in <nni_installation_path>/config/installed_packages.yml
def get_registered_algo_meta(builtin_name, algo_type=None):
""" Get meta information of registered algorithms.
Parameters
----------
algo_type: str | None
can be one of 'tuners', 'assessors', 'advisors' or None
builtin_name: str | None
builtin_name: str
builtin name.
Returns: dict | list of dict | None
-------
If builtin_name is specified, returns meta information of speicified builtin
alogorithms, for example:
{
'name': 'Random',
'class_name': 'nni.hyperopt_tuner.hyperopt_tuner.HyperoptTuner',
'class_args': {
'algorithm_name': 'random_search'
},
'accept_class_args': False,
'class_args_validator': 'nni.hyperopt_tuner.hyperopt_tuner.HyperoptClassArgsValidator'
}
If builtin_name is None, returns multiple meta information in a list.
"""
merged_dict = _get_merged_builtin_dict()
if algo_type is None and builtin_name is None:
return merged_dict
if algo_type:
assert algo_type in ALGO_TYPES
metas = merged_dict[algo_type]
else:
metas = merged_dict['tuners'] + merged_dict['assessors'] + merged_dict['advisors']
if builtin_name:
for m in metas:
if m['name'] == builtin_name:
return m
else:
return metas
return None
def get_installed_package_meta(algo_type, builtin_name):
""" Get meta information of user installed algorithms from:
<nni_installation_path>/config/installed_packages.yml
Parameters
----------
algo_type: str | None
can be one of 'tuners', 'assessors', 'advisors' or None
builtin_name: str
builtin name.
Returns: dict | None
-------
Returns meta information of speicified builtin alogorithms, for example:
{
'class_args_validator': 'nni.smac_tuner.smac_tuner.SMACClassArgsValidator',
'class_name': 'nni.smac_tuner.smac_tuner.SMACTuner',
'name': 'SMAC'
'classArgsValidator': 'nni.smac_tuner.smac_tuner.SMACClassArgsValidator',
'className': 'nni.smac_tuner.smac_tuner.SMACTuner',
'builtinName': 'SMAC'
}
"""
assert builtin_name is not None
if algo_type:
assert algo_type in ALGO_TYPES
config = read_installed_package_meta()
config = read_registerd_algo_meta()
candidates = []
if algo_type:
......@@ -141,11 +60,11 @@ def get_installed_package_meta(algo_type, builtin_name):
for algo_type in ALGO_TYPES:
candidates.extend(config[algo_type])
for meta in candidates:
if meta['name'] == builtin_name:
if meta['builtinName'] == builtin_name:
return meta
return None
def _parse_full_class_name(full_class_name):
def parse_full_class_name(full_class_name):
if not full_class_name:
return None, None
parts = full_class_name.split('.')
......@@ -168,10 +87,10 @@ def get_builtin_module_class_name(algo_type, builtin_name):
"""
assert algo_type in ALGO_TYPES
assert builtin_name is not None
meta = get_builtin_algo_meta(algo_type, builtin_name)
meta = get_registered_algo_meta(builtin_name, algo_type)
if not meta:
return None, None
return _parse_full_class_name(meta['class_name'])
return parse_full_class_name(meta['className'])
def create_validator_instance(algo_type, builtin_name):
"""Create instance of validator class
......@@ -190,10 +109,10 @@ def create_validator_instance(algo_type, builtin_name):
"""
assert algo_type in ALGO_TYPES
assert builtin_name is not None
meta = get_builtin_algo_meta(algo_type, builtin_name)
if not meta or 'class_args_validator' not in meta:
meta = get_registered_algo_meta(builtin_name, algo_type)
if not meta or 'classArgsValidator' not in meta:
return None
module_name, class_name = _parse_full_class_name(meta['class_args_validator'])
module_name, class_name = parse_full_class_name(meta['classArgsValidator'])
class_module = importlib.import_module(module_name)
class_constructor = getattr(class_module, class_name)
......@@ -229,17 +148,17 @@ def create_builtin_class_instance(builtin_name, input_class_args, algo_type):
2. merge user specified class args together with builtin class args.
"""
assert algo_meta
module_name, class_name = _parse_full_class_name(algo_meta['class_name'])
module_name, class_name = parse_full_class_name(algo_meta['className'])
class_args = {}
if 'class_args' in algo_meta:
class_args = algo_meta['class_args']
if 'classArgs' in algo_meta:
class_args = algo_meta['classArgs']
if input_class_args is not None:
class_args.update(input_class_args)
return module_name, class_name, class_args
algo_meta = get_builtin_algo_meta(algo_type, builtin_name)
algo_meta = get_registered_algo_meta(builtin_name, algo_type)
module_name, class_name, class_args = parse_algo_meta(algo_meta, input_class_args)
if importlib.util.find_spec(module_name) is None:
......@@ -287,15 +206,26 @@ def create_customized_class_instance(class_params):
return instance
def get_package_config_path():
# FIXME: this might not be the desired location
config_dir = Path(nni.__path__[0]).parent / 'nni_config'
if not os.path.exists(config_dir):
os.makedirs(config_dir, exist_ok=True)
return os.path.join(config_dir, 'installed_packages.yml')
def _using_conda_or_virtual_environment():
return sys.prefix != sys.base_prefix or os.path.isdir(os.path.join(sys.prefix, 'conda-meta'))
def read_installed_package_meta():
config_file = get_package_config_path()
def get_registered_algo_config_path():
# Find the path for registered_algorithms.yml for this nni installation,
# the registered_algorithms.yml is copied into this location in setup.py,
# so we need to ensure that we use the same logic as setup.py to find the location.
if _using_conda_or_virtual_environment():
nni_config_dir = os.path.join(sys.prefix, 'nni')
elif sys.platform == 'win32':
nni_config_dir = os.path.join(os.getenv('APPDATA'), 'nni')
else:
nni_config_dir = os.path.expanduser('~/.config/nni')
if not os.path.exists(nni_config_dir):
os.makedirs(nni_config_dir, exist_ok=True)
return os.path.join(nni_config_dir, 'registered_algorithms.yml')
def read_registerd_algo_meta():
config_file = get_registered_algo_config_path()
if os.path.exists(config_file):
with open(config_file, 'r') as f:
config = yaml.load(f, Loader=yaml.Loader)
......@@ -306,16 +236,7 @@ def read_installed_package_meta():
config[t] = []
return config
def write_package_meta(config):
config_file = get_package_config_path()
def write_registered_algo_meta(config):
config_file = get_registered_algo_config_path()
with open(config_file, 'w') as f:
f.write(yaml.dump(dict(config), default_flow_style=False))
def _get_merged_builtin_dict():
def merge_meta_dict(d1, d2):
res = defaultdict(list)
for t in ALGO_TYPES:
res[t] = d1[t] + d2[t]
return res
return merge_meta_dict(BuiltinAlgorithms, read_installed_package_meta())
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
BuiltinAlgorithms = {
'tuners': [
{
'name': 'TPE',
'class_name': 'nni.algorithms.hpo.hyperopt_tuner.hyperopt_tuner.HyperoptTuner',
'class_args': {
'algorithm_name': 'tpe'
},
'class_args_validator': 'nni.algorithms.hpo.hyperopt_tuner.hyperopt_tuner.HyperoptClassArgsValidator'
},
{
'name': 'Random',
'class_name': 'nni.algorithms.hpo.hyperopt_tuner.hyperopt_tuner.HyperoptTuner',
'class_args': {
'algorithm_name': 'random_search'
},
'accept_class_args': False,
'class_args_validator': 'nni.algorithms.hpo.hyperopt_tuner.hyperopt_tuner.HyperoptClassArgsValidator'
},
{
'name': 'Anneal',
'class_name': 'nni.algorithms.hpo.hyperopt_tuner.hyperopt_tuner.HyperoptTuner',
'class_args': {
'algorithm_name': 'anneal'
},
'class_args_validator': 'nni.algorithms.hpo.hyperopt_tuner.hyperopt_tuner.HyperoptClassArgsValidator'
},
{
'name': 'Evolution',
'class_name': 'nni.algorithms.hpo.evolution_tuner.evolution_tuner.EvolutionTuner',
'class_args_validator': 'nni.algorithms.hpo.evolution_tuner.evolution_tuner.EvolutionClassArgsValidator'
},
{
'name': 'BatchTuner',
'class_name': 'nni.algorithms.hpo.batch_tuner.batch_tuner.BatchTuner',
'accept_class_args': False,
},
{
'name': 'GridSearch',
'class_name': 'nni.algorithms.hpo.gridsearch_tuner.gridsearch_tuner.GridSearchTuner',
'accept_class_args': False,
},
{
'name': 'NetworkMorphism',
'class_name': 'nni.algorithms.hpo.networkmorphism_tuner.networkmorphism_tuner.NetworkMorphismTuner',
'class_args_validator': 'nni.algorithms.hpo.networkmorphism_tuner.networkmorphism_tuner.NetworkMorphismClassArgsValidator'
},
{
'name': 'MetisTuner',
'class_name': 'nni.algorithms.hpo.metis_tuner.metis_tuner.MetisTuner',
'class_args_validator': 'nni.algorithms.hpo.metis_tuner.metis_tuner.MetisClassArgsValidator'
},
{
'name': 'GPTuner',
'class_name': 'nni.algorithms.hpo.gp_tuner.gp_tuner.GPTuner',
'class_args_validator': 'nni.algorithms.hpo.gp_tuner.gp_tuner.GPClassArgsValidator'
},
{
'name': 'PBTTuner',
'class_name': 'nni.algorithms.hpo.pbt_tuner.pbt_tuner.PBTTuner',
'class_args_validator': 'nni.algorithms.hpo.pbt_tuner.pbt_tuner.PBTClassArgsValidator'
},
{
'name': 'RegularizedEvolutionTuner',
'class_name': 'nni.algorithms.hpo.regularized_evolution_tuner.regularized_evolution_tuner.RegularizedEvolutionTuner',
'class_args_validator': 'nni.algorithms.hpo.regularized_evolution_tuner.regularized_evolution_tuner.EvolutionClassArgsValidator'
}
],
'assessors': [
{
'name': 'Medianstop',
'class_name': 'nni.algorithms.hpo.medianstop_assessor.medianstop_assessor.MedianstopAssessor',
'class_args_validator': 'nni.algorithms.hpo.medianstop_assessor.medianstop_assessor.MedianstopClassArgsValidator'
},
{
'name': 'Curvefitting',
'class_name': 'nni.algorithms.hpo.curvefitting_assessor.curvefitting_assessor.CurvefittingAssessor',
'class_args_validator': 'nni.algorithms.hpo.curvefitting_assessor.curvefitting_assessor.CurvefittingClassArgsValidator'
},
],
'advisors': [
{
'name': 'Hyperband',
'class_name': 'nni.algorithms.hpo.hyperband_advisor.hyperband_advisor.Hyperband',
'class_args_validator': 'nni.algorithms.hpo.hyperband_advisor.hyperband_advisor.HyperbandClassArgsValidator'
}
]
}
......@@ -109,6 +109,7 @@ def extract_scalar_history(trial_history, scalar_key='default'):
def convert_dict2tuple(value):
"""
convert dict type to tuple to solve unhashable problem.
NOTE: this function will change original data.
"""
if isinstance(value, dict):
for _keys in value:
......
......@@ -2,25 +2,30 @@
# so that a bug in any module will cause at least one platform to fail quickly.
jobs:
- job: 'ubuntu_latest'
- job: ubuntu_latest
pool:
# FIXME: In ubuntu-20.04 Python interpreter crashed during SMAC UT
vmImage: 'ubuntu-18.04'
vmImage: ubuntu-18.04
# This platform tests lint and doc first.
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: 3.6
displayName: Configure Python version
- script: |
set -e
python3 -m pip install -U --upgrade pip setuptools
python3 -m pip install -U pytest coverage
python3 -m pip install -U pylint flake8
python3 -m pip install --upgrade pip setuptools
python3 -m pip install pytest coverage
python3 -m pip install pylint flake8
echo "##vso[task.setvariable variable=PATH]${HOME}/.local/bin:${PATH}"
displayName: 'Install Python tools'
displayName: Install Python tools
- script: |
python3 setup.py develop
displayName: 'Install NNI'
displayName: Install NNI
- script: |
set -e
......@@ -28,21 +33,19 @@ jobs:
yarn eslint
cd ../webui
yarn eslint
displayName: 'ESLint'
displayName: ESLint
- script: |
set -e
sudo apt-get install -y pandoc
python3 -m pip install -U --upgrade pygments
python3 -m pip install -U torch==1.7.0+cpu torchvision==0.8.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
python3 -m pip install -U tensorflow==2.3.1
python3 -m pip install -U keras==2.4.2
python3 -m pip install -U gym onnx peewee thop
python3 -m pip install -U sphinx==1.8.3 sphinx-argparse==0.2.5 sphinx-markdown-tables==0.0.9 sphinx-rtd-theme==0.4.2 sphinxcontrib-websupport==1.1.0 recommonmark==0.5.0 nbsphinx
python3 -m pip install --upgrade pygments
python3 -m pip install --upgrade torch>=1.7.0+cpu torchvision>=0.8.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
python3 -m pip install --upgrade tensorflow
python3 -m pip install --upgrade gym onnx peewee thop
python3 -m pip install sphinx==1.8.3 sphinx-argparse==0.2.5 sphinx-markdown-tables==0.0.9 sphinx-rtd-theme==0.4.2 sphinxcontrib-websupport==1.1.0 recommonmark==0.5.0 nbsphinx
sudo apt-get install swig -y
nnictl package install --name=SMAC
nnictl package install --name=BOHB
displayName: 'Install extra dependencies'
python3 -m pip install -e .[SMAC,BOHB]
displayName: Install extra dependencies
- script: |
set -e
......@@ -50,17 +53,17 @@ jobs:
python3 -m flake8 nni --count --select=E9,F63,F72,F82 --show-source --statistics
EXCLUDES=examples/trials/mnist-nas/*/mnist*.py,examples/trials/nas_cifar10/src/cifar10/general_child.py
python3 -m flake8 examples --count --exclude=$EXCLUDES --select=E9,F63,F72,F82 --show-source --statistics
displayName: 'pylint and flake8'
displayName: pylint and flake8
- script: |
cd docs/en_US
sphinx-build -M html . _build -W --keep-going -T
displayName: 'Check Sphinx documentation'
displayName: Check Sphinx documentation
- script: |
cd test
python3 -m pytest ut
displayName: 'Python unit test'
displayName: Python unit test
- script: |
set -e
......@@ -68,52 +71,56 @@ jobs:
yarn test
cd ../nasui
CI=true yarn test
displayName: 'TypeScript unit test'
displayName: TypeScript unit test
- script: |
cd test
python3 nni_test/nnitest/run_tests.py --config config/pr_tests.yml
displayName: 'Simple integration test'
displayName: Simple integration test
- job: 'ubuntu_legacy'
- job: ubuntu_legacy
pool:
vmImage: 'ubuntu-18.04'
vmImage: ubuntu-18.04
# This platform runs integration test first.
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: 3.6
displayName: Configure Python version
- script: |
set -e
python3 -m pip install -U --upgrade pip setuptools
python3 -m pip install -U pytest coverage
python -m pip install --upgrade pip setuptools
python -m pip install pytest coverage
echo "##vso[task.setvariable variable=PATH]${HOME}/.local/bin:${PATH}"
displayName: 'Install Python tools'
displayName: Install Python tools
- script: |
python3 setup.py develop
displayName: 'Install NNI'
python setup.py develop
displayName: Install NNI
- script: |
set -e
python3 -m pip install -U torch==1.5.0+cpu torchvision==0.6.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
python3 -m pip install -U tensorflow==1.15.2
python3 -m pip install -U keras==2.1.6
python3 -m pip install -U gym onnx peewee
python -m pip install torch==1.5.0+cpu torchvision==0.6.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
python -m pip install tensorflow==1.15.4
python -m pip install keras==2.1.6
python -m pip install gym onnx peewee
sudo apt-get install swig -y
nnictl package install --name=SMAC
nnictl package install --name=BOHB
displayName: 'Install extra dependencies'
python -m pip install -e .[SMAC,BOHB]
displayName: Install extra dependencies
- script: |
cd test
python3 nni_test/nnitest/run_tests.py --config config/pr_tests.yml
displayName: 'Simple integration test'
python nni_test/nnitest/run_tests.py --config config/pr_tests.yml
displayName: Simple integration test
- script: |
cd test
python3 -m pytest ut
displayName: 'Python unit test'
python -m pytest ut
displayName: Python unit test
- script: |
set -e
......@@ -121,12 +128,12 @@ jobs:
yarn test
cd ../nasui
CI=true yarn test
displayName: 'TypeScript unit test'
displayName: TypeScript unit test
- job: 'macos'
- job: macos
pool:
vmImage: 'macOS-10.15'
vmImage: macOS-10.15
# This platform runs TypeScript unit test first.
......@@ -134,86 +141,91 @@ jobs:
- task: UsePythonVersion@0
inputs:
versionSpec: 3.8
displayName: Configure Python
displayName: Configure Python version
- script: |
set -e
echo "##vso[task.setvariable variable=PATH]${PATH}:${HOME}/.local/bin"
python -m pip install -U --upgrade pip setuptools wheel
python -m pip install -U pytest coverage
displayName: 'Install Python tools'
python -m pip install --upgrade pip setuptools wheel
python -m pip install pytest coverage
displayName: Install Python tools
- script: |
python3 setup.py develop
displayName: 'Install NNI'
python setup.py develop
displayName: Install NNI
- script: |
set -e
export CI=true
(cd ts/nni_manager && yarn test)
(cd ts/nasui && yarn test)
displayName: 'TypeScript unit test'
displayName: TypeScript unit test
- script: |
set -e
# pytorch Mac binary does not support CUDA, default is cpu version
python3 -m pip install -U torchvision==0.6.0 torch==1.5.0
python3 -m pip install -U tensorflow==2.3.1
python -m pip install torchvision==0.6.0 torch==1.5.0
python -m pip install tensorflow==2.3.1
brew install swig@3
rm -f /usr/local/bin/swig
ln -s /usr/local/opt/swig\@3/bin/swig /usr/local/bin/swig
nnictl package install --name=SMAC
displayName: 'Install extra dependencies'
python -m pip install -e .[SMAC]
displayName: Install extra dependencies
- script: |
cd test
python3 -m pytest ut
displayName: 'Python unit test'
python -m pytest ut
displayName: Python unit test
- script: |
cd test
python3 nni_test/nnitest/run_tests.py --config config/pr_tests.yml
displayName: 'Simple integration test'
python nni_test/nnitest/run_tests.py --config config/pr_tests.yml
displayName: Simple integration test
# FIXME: Windows UT is still under debugging
- job: 'windows'
- job: windows
pool:
vmImage: 'windows-2019'
vmImage: windows-2019
# This platform runs Python unit test first.
steps:
- task: UsePythonVersion@0
inputs:
versionSpec: 3.8
displayName: Configure Python version
- script: |
python -m pip install -U --upgrade pip setuptools
python -m pip install -U pytest coverage
displayName: 'Install Python tools'
python -m pip install --upgrade pip setuptools
python -m pip install pytest coverage
displayName: Install Python tools
- script: |
python setup.py develop --no-user
displayName: 'Install NNI'
displayName: Install NNI
- script: |
python -m pip install -U scikit-learn==0.23.2
python -m pip install -U torch==1.5.0+cpu torchvision==0.6.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
python -m pip install -U tensorflow==2.3.1
displayName: 'Install extra dependencies'
python -m pip install scikit-learn==0.23.2
python -m pip install torch==1.5.0+cpu torchvision==0.6.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
python -m pip install tensorflow==2.3.1
displayName: Install extra dependencies
- script: |
cd test
python -m pytest ut
displayName: 'Python unit test'
displayName: Python unit test
- script: |
cd ts/nni_manager
yarn test
displayName: 'TypeScript unit test'
displayName: TypeScript unit test
- script: |
cd test
python nni_test/nnitest/run_tests.py --config config/pr_tests.yml
displayName: 'Simple integration test'
displayName: Simple integration test
trigger:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment