Unverified Commit 7c4b8c0d authored by liuzhe-lz's avatar liuzhe-lz Committed by GitHub
Browse files

Make pylint happy (#1649)

Update python sdk and nni_annotation to pass pylint rules
parent 22316800
......@@ -139,7 +139,7 @@ class TargetSpace():
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(self.dim())
"expected number of parameters ({}).".format(self.dim)
)
params = {}
......@@ -201,7 +201,7 @@ class TargetSpace():
elif _bound['_type'] == 'qloguniform':
params[col] = parameter_expressions.qloguniform(
_bound['_value'][0], _bound['_value'][1], _bound['_value'][2], self.random_state)
return params
def max(self):
......
......@@ -37,8 +37,8 @@ def _match_val_type(vals, bounds):
_type = bound['_type']
if _type == "choice":
# Find the closest integer in the array, vals_bounds
vals_new.append(
min(bound['_value'], key=lambda x: abs(x - vals[i])))
# pylint: disable=cell-var-from-loop
vals_new.append(min(bound['_value'], key=lambda x: abs(x - vals[i])))
elif _type in ['quniform', 'randint']:
vals_new.append(np.around(vals[i]))
else:
......
......@@ -23,8 +23,8 @@ gridsearch_tuner.py including:
'''
import copy
import numpy as np
import logging
import numpy as np
import nni
from nni.tuner import Tuner
......@@ -44,7 +44,8 @@ class GridSearchTuner(Tuner):
Type 'choice' will select one of the options. Note that it can also be nested.
Type 'quniform' will receive three values [low, high, q], where [low, high] specifies a range and 'q' specifies the interval
It will be sampled in a way that the first sampled value is 'low', and each of the following values is 'interval' larger than the value in front of it.
It will be sampled in a way that the first sampled value is 'low',
and each of the following values is 'interval' larger than the value in front of it.
Type 'randint' gives all possible intergers in range[low, high). Note that 'high' is not included.
'''
......@@ -132,7 +133,7 @@ class GridSearchTuner(Tuner):
def generate_parameters(self, parameter_id, **kwargs):
self.count += 1
while (self.count <= len(self.expanded_search_space)-1):
while self.count <= len(self.expanded_search_space) - 1:
_params_tuple = convert_dict2tuple(self.expanded_search_space[self.count])
if _params_tuple in self.supplement_data:
self.count += 1
......@@ -153,14 +154,14 @@ class GridSearchTuner(Tuner):
"""
_completed_num = 0
for trial_info in data:
logger.info("Importing data, current processing progress %s / %s" %(_completed_num, len(data)))
logger.info("Importing data, current processing progress %s / %s", _completed_num, len(data))
_completed_num += 1
assert "parameter" in trial_info
_params = trial_info["parameter"]
assert "value" in trial_info
_value = trial_info['value']
if not _value:
logger.info("Useless trial data, value is %s, skip this trial data." %_value)
logger.info("Useless trial data, value is %s, skip this trial data.", _value)
continue
_params_tuple = convert_dict2tuple(_params)
self.supplement_data[_params_tuple] = True
......
......@@ -32,7 +32,7 @@ from nni.common import multi_phase_enabled
from nni.msg_dispatcher_base import MsgDispatcherBase
from nni.protocol import CommandType, send
from nni.utils import NodeType, OptimizeMode, MetricType, extract_scalar_reward
import nni.parameter_expressions as parameter_expressions
from nni import parameter_expressions
_logger = logging.getLogger(__name__)
......@@ -49,7 +49,7 @@ def create_parameter_id():
int
parameter id
"""
global _next_parameter_id # pylint: disable=global-statement
global _next_parameter_id
_next_parameter_id += 1
return _next_parameter_id - 1
......@@ -102,8 +102,7 @@ def json2parameter(ss_spec, random_state):
_index = random_state.randint(len(_value))
chosen_params = json2parameter(ss_spec[NodeType.VALUE][_index], random_state)
else:
chosen_params = eval('parameter_expressions.' + # pylint: disable=eval-used
_type)(*(_value + [random_state]))
chosen_params = getattr(parameter_expressions, _type)(*(_value + [random_state]))
else:
chosen_params = dict()
for key in ss_spec.keys():
......@@ -140,8 +139,8 @@ class Bracket():
self.bracket_id = s
self.s_max = s_max
self.eta = eta
self.n = math.ceil((s_max + 1) * (eta ** s) / (s + 1) - _epsilon) # pylint: disable=invalid-name
self.r = R / eta ** s # pylint: disable=invalid-name
self.n = math.ceil((s_max + 1) * (eta ** s) / (s + 1) - _epsilon)
self.r = R / eta ** s
self.i = 0
self.hyper_configs = [] # [ {id: params}, {}, ... ]
self.configs_perf = [] # [ {id: [seq, acc]}, {}, ... ]
......@@ -197,7 +196,7 @@ class Bracket():
i: int
the ith round
"""
global _KEY # pylint: disable=global-statement
global _KEY
self.num_finished_configs[i] += 1
_logger.debug('bracket id: %d, round: %d %d, finished: %d, all: %d', self.bracket_id, self.i, i,
self.num_finished_configs[i], self.num_configs_to_run[i])
......@@ -226,7 +225,7 @@ class Bracket():
return [[key, value] for key, value in hyper_configs.items()]
return None
def get_hyperparameter_configurations(self, num, r, searchspace_json, random_state): # pylint: disable=invalid-name
def get_hyperparameter_configurations(self, num, r, searchspace_json, random_state):
"""Randomly generate num hyperparameter configurations from search space
Parameters
......@@ -239,7 +238,7 @@ class Bracket():
list
a list of hyperparameter configurations. Format: [[key1, value1], [key2, value2], ...]
"""
global _KEY # pylint: disable=global-statement
global _KEY
assert self.i == 0
hyperparameter_configs = dict()
for _ in range(num):
......@@ -285,7 +284,7 @@ class Hyperband(MsgDispatcherBase):
def __init__(self, R=60, eta=3, optimize_mode='maximize'):
"""B = (s_max + 1)R"""
super(Hyperband, self).__init__()
self.R = R # pylint: disable=invalid-name
self.R = R
self.eta = eta
self.brackets = dict() # dict of Bracket
self.generated_hyper_configs = [] # all the configs waiting for run
......@@ -415,7 +414,7 @@ class Hyperband(MsgDispatcherBase):
bracket_id, i, _ = data['parameter_id'].split('_')
bracket_id = int(bracket_id)
# add <trial_job_id, parameter_id> to self.job_id_para_id_map here,
# add <trial_job_id, parameter_id> to self.job_id_para_id_map here,
# because when the first parameter_id is created, trial_job_id is not known yet.
if data['trial_job_id'] in self.job_id_para_id_map:
assert self.job_id_para_id_map[data['trial_job_id']] == data['parameter_id']
......
......@@ -51,13 +51,13 @@ def json2space(in_x, name=NodeType.ROOT):
name = name + '-' + _type
_value = json2space(in_x[NodeType.VALUE], name=name)
if _type == 'choice':
out_y = eval('hp.hp.choice')(name, _value)
out_y = hp.hp.choice(name, _value)
elif _type == 'randint':
out_y = hp.hp.randint(name, _value[1] - _value[0])
else:
if _type in ['loguniform', 'qloguniform']:
_value[:2] = np.log(_value[:2])
out_y = eval('hp.hp.' + _type)(name, *_value)
out_y = getattr(hp.hp, _type)(name, *_value)
else:
out_y = dict()
for key in in_x.keys():
......@@ -191,6 +191,7 @@ def _add_index(in_x, parameter):
return {NodeType.INDEX: pos, NodeType.VALUE: item}
else:
return parameter
return None # note: this is not written by original author, feel free to modify if you think it's incorrect
class HyperoptTuner(Tuner):
......@@ -198,7 +199,7 @@ class HyperoptTuner(Tuner):
HyperoptTuner is a tuner which using hyperopt algorithm.
"""
def __init__(self, algorithm_name, optimize_mode='minimize',
def __init__(self, algorithm_name, optimize_mode='minimize',
parallel_optimize=False, constant_liar_type='min'):
"""
Parameters
......@@ -206,7 +207,7 @@ class HyperoptTuner(Tuner):
algorithm_name : str
algorithm_name includes "tpe", "random_search" and anneal".
optimize_mode : str
parallel_optimize : bool
parallel_optimize : bool
More detail could reference: docs/en_US/Tuner/HyperoptTuner.md
constant_liar_type : str
constant_liar_type including "min", "max" and "mean"
......@@ -290,7 +291,7 @@ class HyperoptTuner(Tuner):
if self.parallel:
self.running_data.append(parameter_id)
params = split_index(total_params)
return params
......@@ -409,8 +410,8 @@ class HyperoptTuner(Tuner):
misc_by_id = {m['tid']: m for m in miscs}
for m in miscs:
m['idxs'] = dict([(key, []) for key in idxs])
m['vals'] = dict([(key, []) for key in idxs])
m['idxs'] = {key: [] for key in idxs}
m['vals'] = {key: [] for key in idxs}
for key in idxs:
assert len(idxs[key]) == len(vals[key])
......@@ -433,7 +434,7 @@ class HyperoptTuner(Tuner):
total_params : dict
parameter suggestion
"""
if self.parallel and len(self.total_data)>20 and len(self.running_data) and self.optimal_y is not None:
if self.parallel and len(self.total_data) > 20 and self.running_data and self.optimal_y is not None:
self.CL_rval = copy.deepcopy(self.rval)
if self.constant_liar_type == 'mean':
_constant_liar_y = self.optimal_y[0] / self.optimal_y[1]
......@@ -447,7 +448,7 @@ class HyperoptTuner(Tuner):
else:
rval = self.rval
random_state = rval.rstate.randint(2**31 - 1)
trials = rval.trials
algorithm = rval.algo
new_ids = rval.trials.new_trial_ids(1)
......@@ -481,8 +482,7 @@ class HyperoptTuner(Tuner):
"""
_completed_num = 0
for trial_info in data:
logger.info("Importing data, current processing progress %s / %s" %
(_completed_num, len(data)))
logger.info("Importing data, current processing progress %s / %s", _completed_num, len(data))
_completed_num += 1
if self.algorithm_name == 'random_search':
return
......@@ -491,9 +491,7 @@ class HyperoptTuner(Tuner):
assert "value" in trial_info
_value = trial_info['value']
if not _value:
logger.info(
"Useless trial data, value is %s, skip this trial data." %
_value)
logger.info("Useless trial data, value is %s, skip this trial data.", _value)
continue
self.supplement_data_num += 1
_parameter_id = '_'.join(
......
......@@ -42,7 +42,7 @@ class MedianstopAssessor(Assessor):
self.high_better = False
else:
self.high_better = True
logger.warning('unrecognized optimize_mode', optimize_mode)
logger.warning('unrecognized optimize_mode %s', optimize_mode)
def _update_data(self, trial_job_id, trial_history):
"""update data
......@@ -121,10 +121,10 @@ class MedianstopAssessor(Assessor):
best_history = min(trial_history)
avg_array = []
for id in self.completed_avg_history:
if len(self.completed_avg_history[id]) >= curr_step:
avg_array.append(self.completed_avg_history[id][curr_step - 1])
if len(avg_array) > 0:
for id_ in self.completed_avg_history:
if len(self.completed_avg_history[id_]) >= curr_step:
avg_array.append(self.completed_avg_history[id_][curr_step - 1])
if avg_array:
avg_array.sort()
if self.high_better:
median = avg_array[(len(avg_array)-1) // 2]
......
......@@ -22,7 +22,6 @@ import random
from .medianstop_assessor import MedianstopAssessor
from nni.assessor import AssessResult
logger = logging.getLogger('nni.contrib.medianstop_assessor')
logger.debug('START')
......@@ -66,4 +65,4 @@ try:
test()
except Exception as exception:
logger.exception(exception)
raise
\ No newline at end of file
raise
......@@ -24,7 +24,6 @@ import sys
import nni.metis_tuner.lib_acquisition_function as lib_acquisition_function
import nni.metis_tuner.lib_constraint_summation as lib_constraint_summation
import nni.metis_tuner.lib_data as lib_data
sys.path.insert(1, os.path.join(sys.path[0], '..'))
......@@ -52,13 +51,13 @@ def selection_r(x_bounds,
Select using different types.
'''
minimize_starting_points = clusteringmodel_gmm_good.sample(n_samples=num_starting_points)
outputs = selection(x_bounds, x_types,
clusteringmodel_gmm_good,
clusteringmodel_gmm_bad,
minimize_starting_points[0],
minimize_constraints_fun)
return outputs
def selection(x_bounds,
......
......@@ -19,12 +19,12 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse, json, os, sys
import os
import sys
from multiprocessing.dummy import Pool as ThreadPool
import nni.metis_tuner.Regression_GP.CreateModel as gp_create_model
import nni.metis_tuner.Regression_GP.Prediction as gp_prediction
import nni.metis_tuner.lib_data as lib_data
sys.path.insert(1, os.path.join(sys.path[0], '..'))
......@@ -71,14 +71,15 @@ def outlierDetection_threaded(samples_x, samples_y_aggregation):
else:
print("error here.")
outliers = None if len(outliers) == 0 else outliers
outliers = outliers if outliers else None
return outliers
def outlierDetection(samples_x, samples_y_aggregation):
'''
TODO
'''
outliers = []
for samples_idx in range(0, len(samples_x)):
for samples_idx, _ in enumerate(samples_x):
#sys.stderr.write("[%s] DEBUG: Evaluating %d of %d samples\n"
# \ % (os.path.basename(__file__), samples_idx + 1, len(samples_x)))
diagnostic_regressor_gp = gp_create_model.create_model(\
......@@ -93,5 +94,5 @@ def outlierDetection(samples_x, samples_y_aggregation):
"expected_sigma": sigma,
"difference": abs(samples_y_aggregation[samples_idx] - mu) - (2.33 * sigma)})
outliers = None if len(outliers) == 0 else outliers
outliers = outliers if outliers else None
return outliers
......@@ -31,6 +31,7 @@ def match_val_type(vals, vals_bounds, vals_types):
for i, _ in enumerate(vals_types):
if vals_types[i] == "discrete_int":
# Find the closest integer in the array, vals_bounds
# pylint: disable=cell-var-from-loop
vals_new.append(min(vals_bounds[i], key=lambda x: abs(x - vals[i])))
elif vals_types[i] == "range_int":
# Round down to the nearest integer
......@@ -64,4 +65,3 @@ def rand(x_bounds, x_types):
return None
return outputs
\ No newline at end of file
......@@ -20,14 +20,11 @@
import copy
import logging
import numpy as np
import os
import random
import statistics
import sys
import warnings
from enum import Enum, unique
from multiprocessing.dummy import Pool as ThreadPool
import numpy as np
import nni.metis_tuner.lib_constraint_summation as lib_constraint_summation
import nni.metis_tuner.lib_data as lib_data
......@@ -99,6 +96,8 @@ class MetisTuner(Tuner):
self.minimize_constraints_fun = None
self.minimize_starting_points = None
self.supplement_data_num = 0
self.x_bounds = []
self.x_types = []
def update_search_space(self, search_space):
......@@ -144,7 +143,7 @@ class MetisTuner(Tuner):
self.x_types[idx] = 'discrete_int'
else:
logger.info("Metis Tuner doesn't support this kind of variable: " + str(key_type))
logger.info("Metis Tuner doesn't support this kind of variable: %s", key_type)
raise RuntimeError("Metis Tuner doesn't support this kind of variable: " + str(key_type))
else:
logger.info("The format of search space is not a dict.")
......@@ -198,7 +197,7 @@ class MetisTuner(Tuner):
minimize_starting_points=self.minimize_starting_points,
minimize_constraints_fun=self.minimize_constraints_fun)
logger.info("Generate paramageters:\n" + str(results))
logger.info("Generate paramageters:\n%s", results)
return results
......@@ -217,8 +216,8 @@ class MetisTuner(Tuner):
value = -value
logger.info("Received trial result.")
logger.info("value is :" + str(value))
logger.info("parameter is : " + str(parameters))
logger.info("value is :%s", value)
logger.info("parameter is : %s", parameters)
# parse parameter to sample_x
sample_x = [0 for i in range(len(self.key_order))]
......@@ -271,10 +270,12 @@ class MetisTuner(Tuner):
minimize_constraints_fun=minimize_constraints_fun)
if not lm_current:
return None
logger.info({'hyperparameter': lm_current['hyperparameter'],
'expected_mu': lm_current['expected_mu'],
'expected_sigma': lm_current['expected_sigma'],
'reason': "exploitation_gp"})
logger.info({
'hyperparameter': lm_current['hyperparameter'],
'expected_mu': lm_current['expected_mu'],
'expected_sigma': lm_current['expected_sigma'],
'reason': "exploitation_gp"
})
if no_candidates is False:
# ===== STEP 2: Get recommended configurations for exploration =====
......@@ -289,10 +290,12 @@ class MetisTuner(Tuner):
if results_exploration is not None:
if _num_past_samples(results_exploration['hyperparameter'], samples_x, samples_y) == 0:
temp_candidate = {'hyperparameter': results_exploration['hyperparameter'],
'expected_mu': results_exploration['expected_mu'],
'expected_sigma': results_exploration['expected_sigma'],
'reason': "exploration"}
temp_candidate = {
'hyperparameter': results_exploration['hyperparameter'],
'expected_mu': results_exploration['expected_mu'],
'expected_sigma': results_exploration['expected_sigma'],
'reason': "exploration"
}
candidates.append(temp_candidate)
logger.info("DEBUG: 1 exploration candidate selected\n")
......@@ -322,11 +325,14 @@ class MetisTuner(Tuner):
if results_exploitation is not None:
if _num_past_samples(results_exploitation['hyperparameter'], samples_x, samples_y) == 0:
temp_expected_mu, temp_expected_sigma = gp_prediction.predict(results_exploitation['hyperparameter'], gp_model['model'])
temp_candidate = {'hyperparameter': results_exploitation['hyperparameter'],
'expected_mu': temp_expected_mu,
'expected_sigma': temp_expected_sigma,
'reason': "exploitation_gmm"}
temp_expected_mu, temp_expected_sigma = \
gp_prediction.predict(results_exploitation['hyperparameter'], gp_model['model'])
temp_candidate = {
'hyperparameter': results_exploitation['hyperparameter'],
'expected_mu': temp_expected_mu,
'expected_sigma': temp_expected_sigma,
'reason': "exploitation_gmm"
}
candidates.append(temp_candidate)
logger.info("DEBUG: 1 exploitation_gmm candidate selected\n")
......@@ -349,7 +355,7 @@ class MetisTuner(Tuner):
results_outliers = gp_outlier_detection.outlierDetection_threaded(samples_x, samples_y_aggregation)
if results_outliers is not None:
for results_outlier in results_outliers:
for results_outlier in results_outliers: # pylint: disable=not-an-iterable
if _num_past_samples(samples_x[results_outlier['samples_idx']], samples_x, samples_y) < max_resampling_per_x:
temp_candidate = {'hyperparameter': samples_x[results_outlier['samples_idx']],\
'expected_mu': results_outlier['expected_mu'],\
......@@ -398,10 +404,12 @@ class MetisTuner(Tuner):
next_candidate = {'hyperparameter': next_candidate, 'reason': "random",
'expected_mu': expected_mu, 'expected_sigma': expected_sigma}
# ===== STEP 7: If current optimal hyperparameter occurs in the history or exploration probability is less than the threshold, take next config as exploration step =====
# ===== STEP 7 =====
# If current optimal hyperparameter occurs in the history or exploration probability is less than the threshold,
# take next config as exploration step
outputs = self._pack_output(lm_current['hyperparameter'])
ap = random.uniform(0, 1)
if outputs in self.total_data or ap<=self.exploration_probability:
if outputs in self.total_data or ap <= self.exploration_probability:
if next_candidate is not None:
outputs = self._pack_output(next_candidate['hyperparameter'])
else:
......@@ -419,14 +427,14 @@ class MetisTuner(Tuner):
"""
_completed_num = 0
for trial_info in data:
logger.info("Importing data, current processing progress %s / %s" %(_completed_num, len(data)))
logger.info("Importing data, current processing progress %s / %s", _completed_num, len(data))
_completed_num += 1
assert "parameter" in trial_info
_params = trial_info["parameter"]
assert "value" in trial_info
_value = trial_info['value']
if not _value:
logger.info("Useless trial data, value is %s, skip this trial data." %_value)
logger.info("Useless trial data, value is %s, skip this trial data.", _value)
continue
self.supplement_data_num += 1
_parameter_id = '_'.join(["ImportData", str(self.supplement_data_num)])
......
......@@ -61,7 +61,7 @@ _customized_parameter_ids = set()
def _create_parameter_id():
global _next_parameter_id # pylint: disable=global-statement
global _next_parameter_id
_next_parameter_id += 1
return _next_parameter_id - 1
......@@ -106,15 +106,15 @@ class MsgDispatcher(MsgDispatcherBase):
self.tuner.update_search_space(data)
send(CommandType.Initialized, '')
def send_trial_callback(self, id, params):
def send_trial_callback(self, id_, params):
"""For tuner to issue trial config when the config is generated
"""
send(CommandType.NewTrialJob, _pack_parameter(id, params))
send(CommandType.NewTrialJob, _pack_parameter(id_, params))
def handle_request_trial_jobs(self, data):
# data: number or trial jobs
ids = [_create_parameter_id() for _ in range(data)]
_logger.debug("requesting for generating params of {}".format(ids))
_logger.debug("requesting for generating params of %s", ids)
params_list = self.tuner.generate_multiple_parameters(ids, st_callback=self.send_trial_callback)
for i, _ in enumerate(params_list):
......@@ -218,7 +218,8 @@ class MsgDispatcher(MsgDispatcherBase):
try:
result = self.assessor.assess_trial(trial_job_id, ordered_history)
except Exception as e:
_logger.exception('Assessor error')
_logger.error('Assessor error')
_logger.exception(e)
if isinstance(result, bool):
result = AssessResult.Good if result else AssessResult.Bad
......
......@@ -18,7 +18,6 @@
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
import os
import threading
import logging
from multiprocessing.dummy import Pool as ThreadPool
......@@ -146,7 +145,7 @@ class MsgDispatcherBase(Recoverable):
pass
def process_command(self, command, data):
_logger.debug('process_command: command: [{}], data: [{}]'.format(command, data))
_logger.debug('process_command: command: [%s], data: [%s]', command, data)
command_handlers = {
# Tuner commands:
......
......@@ -25,6 +25,11 @@ from . import trial
_logger = logging.getLogger(__name__)
_MUTABLE_LAYER_SPACE_PREFIX = "_mutable_layer"
_namespace = {}
_tf_variables = {}
_arch_logits_list = []
_optimizer = None
_train_op = None
def classic_mode(
......@@ -64,47 +69,40 @@ def enas_mode(
it can be known which inputs should be masked and which op should be executed.'''
name_prefix = "{}_{}".format(mutable_id, mutable_layer_id)
# store namespace
if 'name_space' not in globals():
global name_space
name_space = dict()
name_space[mutable_id] = True
name_space[name_prefix] = dict()
name_space[name_prefix]['funcs'] = list(funcs)
name_space[name_prefix]['optional_inputs'] = list(optional_inputs)
_namespace[mutable_id] = True
_namespace[name_prefix] = dict()
_namespace[name_prefix]['funcs'] = list(funcs)
_namespace[name_prefix]['optional_inputs'] = list(optional_inputs)
# create tensorflow variables as 1/0 signals used to form subgraph
if 'tf_variables' not in globals():
global tf_variables
tf_variables = dict()
name_for_optional_inputs = name_prefix + '_optional_inputs'
name_for_funcs = name_prefix + '_funcs'
tf_variables[name_prefix] = dict()
tf_variables[name_prefix]['optional_inputs'] = tf.get_variable(name_for_optional_inputs,
[len(
optional_inputs)],
dtype=tf.bool,
trainable=False)
tf_variables[name_prefix]['funcs'] = tf.get_variable(
_tf_variables[name_prefix] = dict()
_tf_variables[name_prefix]['optional_inputs'] = tf.get_variable(
name_for_optional_inputs,
[len(optional_inputs)],
dtype=tf.bool,
trainable=False
)
_tf_variables[name_prefix]['funcs'] = tf.get_variable(
name_for_funcs, [], dtype=tf.int64, trainable=False)
# get real values using their variable names
real_optional_inputs_value = [optional_inputs[name]
for name in name_space[name_prefix]['optional_inputs']]
for name in _namespace[name_prefix]['optional_inputs']]
real_func_value = [funcs[name]
for name in name_space[name_prefix]['funcs']]
for name in _namespace[name_prefix]['funcs']]
real_funcs_args = [funcs_args[name]
for name in name_space[name_prefix]['funcs']]
for name in _namespace[name_prefix]['funcs']]
# build tensorflow graph of geting chosen inputs by masking
real_chosen_inputs = tf.boolean_mask(
real_optional_inputs_value, tf_variables[name_prefix]['optional_inputs'])
real_optional_inputs_value, _tf_variables[name_prefix]['optional_inputs'])
# build tensorflow graph of different branches by using tf.case
branches = dict()
func_output = None
for func_id in range(len(funcs)):
func_output = real_func_value[func_id](
[fixed_inputs, real_chosen_inputs], **real_funcs_args[func_id])
branches[tf.equal(tf_variables[name_prefix]['funcs'],
func_id)] = lambda: func_output
layer_out = tf.case(branches, exclusive=True,
default=lambda: func_output)
func_output = real_func_value[func_id]([fixed_inputs, real_chosen_inputs], **real_funcs_args[func_id])
branches[tf.equal(_tf_variables[name_prefix]['funcs'], func_id)] = lambda: func_output
layer_out = tf.case(branches, exclusive=True, default=lambda: func_output)
return layer_out
......@@ -157,12 +155,9 @@ def darts_mode(
layer_outs = [func([fixed_inputs, optional_inputs], **funcs_args[func_name])
for func_name, func in funcs.items()]
# Create architecture weights for every func(op)
var_name = "{}_{}_".format(mutable_id, mutable_layer_id, "arch_weights")
if 'arch_logits_list' not in globals():
global arch_logits_list
arch_logits_list = list()
arch_logits = tf.get_variable(var_name, shape=[len[funcs]], trainable=False)
arch_logits_list.append(arch_logits)
var_name = "{}_{}_arch_weights".format(mutable_id, mutable_layer_id)
arch_logits = tf.get_variable(var_name, shape=[len(funcs)], trainable=False)
_arch_logits_list.append(arch_logits)
arch_weights = tf.nn.softmax(arch_logits)
layer_out = tf.add_n([arch_weights[idx] * out for idx, out in enumerate(layer_outs)])
......@@ -186,19 +181,19 @@ def reload_tensorflow_variables(tf, session):
mutable_layers.add((mutable_id, mutable_layer_id))
mutable_layers = sorted(list(mutable_layers))
for mutable_id, mutable_layer_id in mutable_layers:
if mutable_id not in name_space:
_logger.warning("{} not found in name space".format(mutable_id))
if mutable_id not in _namespace:
_logger.warning("%s not found in name space", mutable_id)
continue
name_prefix = "{}_{}".format(mutable_id, mutable_layer_id)
# get optional inputs names
optional_inputs = name_space[name_prefix]['optional_inputs']
optional_inputs = _namespace[name_prefix]['optional_inputs']
# extract layer information from the subgraph sampled by tuner
chosen_layer, chosen_inputs = _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id, optional_inputs)
chosen_layer = name_space[name_prefix]['funcs'].index(chosen_layer)
chosen_layer = _namespace[name_prefix]['funcs'].index(chosen_layer)
chosen_inputs = [1 if inp in chosen_inputs else 0 for inp in optional_inputs]
# load these information into pre-defined tensorflow variables
tf_variables[name_prefix]['funcs'].load(chosen_layer, session)
tf_variables[name_prefix]['optional_inputs'].load(
_tf_variables[name_prefix]['funcs'].load(chosen_layer, session)
_tf_variables[name_prefix]['optional_inputs'].load(
chosen_inputs, session)
......@@ -218,15 +213,13 @@ def _decompose_general_key(key):
def darts_training(tf, session, loss, feed_dict):
if 'optimizer' not in globals():
global arch_logits_list
global optimizer
global train_op
optimizer = tf.MomentumOptimizer(learning_rate=0.025)
global _optimizer, _train_op
if _optimizer is None:
_optimizer = tf.MomentumOptimizer(learning_rate=0.025)
# TODO: Calculate loss
grads_and_vars = optimizer.compute_gradients(loss, arch_logits_list)
train_op = optimizer.apply_gradients(grads_and_vars)
session.run(train_op)
grads_and_vars = _optimizer.compute_gradients(loss, _arch_logits_list)
_train_op = _optimizer.apply_gradients(grads_and_vars)
session.run(_train_op)
def training_update(nas_mode, tf=None, session=None, loss=None, feed_dict=None):
......@@ -258,12 +251,11 @@ def _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id, optional_inpu
chosen_inputs = []
# make sure dict -> list produce stable result by sorting
optional_inputs_keys = sorted(optional_inputs)
for i in range(optional_input_size):
for _ in range(optional_input_size):
chosen_inputs.append(optional_inputs_keys[optional_input_state % len(optional_inputs)])
optional_input_state //= len(optional_inputs)
_logger.info("%s_%s: layer: %s, optional inputs: %s" % (mutable_id, mutable_layer_id,
chosen_layer, chosen_inputs))
_logger.info("%s_%s: layer: %s, optional inputs: %s", mutable_id, mutable_layer_id, chosen_layer, chosen_inputs)
return chosen_layer, chosen_inputs
......@@ -278,12 +270,12 @@ def convert_nas_search_space(search_space):
if "_type" not in v:
# this should not happen
_logger.warning("There is no _type in one of your search space values with key '%s'"
". Please check your search space" % k)
". Please check your search space", k)
ret[k] = v
elif v["_type"] != "mutable_layer":
ret[k] = v
else:
_logger.info("Converting mutable_layer search space with key '%s'" % k)
_logger.info("Converting mutable_layer search space with key '%s'", k)
# v["_value"] looks like {'mutable_layer_1': {'layer_choice': ...} ...}
values = v["_value"]
for layer_name, layer_data in values.items():
......@@ -305,13 +297,13 @@ def convert_nas_search_space(search_space):
_logger.error("Might not be able to handle optional_input_size < 0, please double check")
input_size[1] += 1
else:
_logger.info("Optional input choices are set to empty by default in %s" % layer_key)
_logger.info("Optional input choices are set to empty by default in %s", layer_key)
input_size = [0, 1]
if layer_data.get("optional_inputs"):
total_state_size = len(layer_data["optional_inputs"]) ** (input_size[1] - 1)
else:
_logger.info("Optional inputs not found in %s" % layer_key)
_logger.info("Optional inputs not found in %s", layer_key)
total_state_size = 1
converted = {
......
......@@ -19,7 +19,7 @@
# ==================================================================================================
import json
from collections import Iterable
from collections.abc import Iterable
from copy import deepcopy, copy
from queue import Queue
......@@ -653,7 +653,7 @@ class Graph:
return JSONModel(self).data
@classmethod
def parsing_json_model(self, json_model):
def parsing_json_model(cls, json_model):
'''build a graph from json
'''
return json_to_graph(json_model)
......@@ -910,7 +910,6 @@ def graph_to_onnx(graph, onnx_model_path):
def onnx_to_graph(onnx_model, input_shape):
import onnx
# to do in the future using onnx ir
graph = Graph(input_shape, False)
graph.parsing_onnx_model(onnx_model)
......
......@@ -124,7 +124,7 @@ def wider_pre_conv(layer, n_add_filters, weighted=True):
student_w = teacher_w.copy()
student_b = teacher_b.copy()
# target layer update (i)
for i in range(len(rand)):
for i, _ in enumerate(rand):
teacher_index = rand[i]
new_weight = teacher_w[teacher_index, ...]
new_weight = new_weight[np.newaxis, ...]
......
......@@ -19,7 +19,7 @@
# ==================================================================================================
from abc import abstractmethod
from collections import Iterable
from collections.abc import Iterable
import torch
from torch import nn
......@@ -76,7 +76,6 @@ class StubLayer:
def build(self, shape):
'''build shape.
'''
pass
def set_weights(self, weights):
'''set weights.
......@@ -86,22 +85,18 @@ class StubLayer:
def import_weights(self, torch_layer):
'''import weights.
'''
pass
def import_weights_keras(self, keras_layer):
'''import weights from keras layer.
'''
pass
def export_weights(self, torch_layer):
'''export weights.
'''
pass
def export_weights_keras(self, keras_layer):
'''export weights to keras layer.
'''
pass
def get_weights(self):
'''get weights.
......@@ -122,7 +117,6 @@ class StubLayer:
def to_real_layer(self):
'''to real layer.
'''
pass
def __str__(self):
'''str() function to print.
......@@ -576,6 +570,7 @@ def to_real_keras_layer(layer):
return layers.Flatten()
if is_layer(layer, "GlobalAveragePooling"):
return layers.GlobalAveragePooling2D()
return None # note: this is not written by original author, feel free to modify if you think it's incorrect
def is_layer(layer, layer_type):
......@@ -608,6 +603,7 @@ def is_layer(layer, layer_type):
return isinstance(layer, (StubFlatten,))
elif layer_type == "GlobalAveragePooling":
return isinstance(layer, StubGlobalPooling)
return None # note: this is not written by original author, feel free to modify if you think it's incorrect
def layer_description_extractor(layer, node_to_id):
......@@ -664,7 +660,6 @@ def layer_description_extractor(layer, node_to_id):
def layer_description_builder(layer_information, id_to_node):
'''build layer from description.
'''
# pylint: disable=W0123
layer_type = layer_information[0]
layer_input_ids = layer_information[1]
......@@ -678,26 +673,26 @@ def layer_description_builder(layer_information, id_to_node):
filters = layer_information[4]
kernel_size = layer_information[5]
stride = layer_information[6]
return eval(layer_type)(
return globals()[layer_type](
input_channel, filters, kernel_size, stride, layer_input, layer_output
)
elif layer_type.startswith("StubDense"):
input_units = layer_information[3]
units = layer_information[4]
return eval(layer_type)(input_units, units, layer_input, layer_output)
return globals()[layer_type](input_units, units, layer_input, layer_output)
elif layer_type.startswith("StubBatchNormalization"):
num_features = layer_information[3]
return eval(layer_type)(num_features, layer_input, layer_output)
return globals()[layer_type](num_features, layer_input, layer_output)
elif layer_type.startswith("StubDropout"):
rate = layer_information[3]
return eval(layer_type)(rate, layer_input, layer_output)
return globals()[layer_type](rate, layer_input, layer_output)
elif layer_type.startswith("StubPooling"):
kernel_size = layer_information[3]
stride = layer_information[4]
padding = layer_information[5]
return eval(layer_type)(kernel_size, stride, padding, layer_input, layer_output)
return globals()[layer_type](kernel_size, stride, padding, layer_input, layer_output)
else:
return eval(layer_type)(layer_input, layer_output)
return globals()[layer_type](layer_input, layer_output)
def layer_width(layer):
......
......@@ -310,4 +310,3 @@ class NetworkMorphismTuner(Tuner):
def import_data(self, data):
pass
......@@ -19,8 +19,6 @@
# ==================================================================================================
# pylint: disable=wildcard-import
from ..env_vars import trial_env_vars
if trial_env_vars.NNI_PLATFORM is None:
......
......@@ -39,8 +39,8 @@ if not os.path.exists(_outputdir):
_nni_platform = trial_env_vars.NNI_PLATFORM
if _nni_platform == 'local':
_log_file_path = os.path.join(_outputdir, 'trial.log')
init_logger(_log_file_path)
_log_file_path = os.path.join(_outputdir, 'trial.log')
init_logger(_log_file_path)
_multiphase = trial_env_vars.MULTI_PHASE
......@@ -58,7 +58,7 @@ def request_next_parameter():
def get_next_parameter():
global _param_index
params_file_name = ''
if _multiphase and (_multiphase == 'true' or _multiphase == 'True'):
if _multiphase in ('true', 'True'):
params_file_name = ('parameter_{}.cfg'.format(_param_index), 'parameter.cfg')[_param_index == 0]
else:
if _param_index > 0:
......@@ -92,7 +92,7 @@ def send_metric(string):
file = open(_metric_file.name)
file.close()
else:
subprocess.run(['touch', _metric_file.name], check = True)
subprocess.run(['touch', _metric_file.name], check=True)
def get_experiment_id():
return trial_env_vars.NNI_EXP_ID
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment