Unverified Commit 7c4b8c0d authored by liuzhe-lz's avatar liuzhe-lz Committed by GitHub
Browse files

Make pylint happy (#1649)

Update python sdk and nni_annotation to pass pylint rules
parent 22316800
...@@ -139,7 +139,7 @@ class TargetSpace(): ...@@ -139,7 +139,7 @@ class TargetSpace():
except AssertionError: except AssertionError:
raise ValueError( raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) + "Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(self.dim()) "expected number of parameters ({}).".format(self.dim)
) )
params = {} params = {}
......
...@@ -37,8 +37,8 @@ def _match_val_type(vals, bounds): ...@@ -37,8 +37,8 @@ def _match_val_type(vals, bounds):
_type = bound['_type'] _type = bound['_type']
if _type == "choice": if _type == "choice":
# Find the closest integer in the array, vals_bounds # Find the closest integer in the array, vals_bounds
vals_new.append( # pylint: disable=cell-var-from-loop
min(bound['_value'], key=lambda x: abs(x - vals[i]))) vals_new.append(min(bound['_value'], key=lambda x: abs(x - vals[i])))
elif _type in ['quniform', 'randint']: elif _type in ['quniform', 'randint']:
vals_new.append(np.around(vals[i])) vals_new.append(np.around(vals[i]))
else: else:
......
...@@ -23,8 +23,8 @@ gridsearch_tuner.py including: ...@@ -23,8 +23,8 @@ gridsearch_tuner.py including:
''' '''
import copy import copy
import numpy as np
import logging import logging
import numpy as np
import nni import nni
from nni.tuner import Tuner from nni.tuner import Tuner
...@@ -44,7 +44,8 @@ class GridSearchTuner(Tuner): ...@@ -44,7 +44,8 @@ class GridSearchTuner(Tuner):
Type 'choice' will select one of the options. Note that it can also be nested. Type 'choice' will select one of the options. Note that it can also be nested.
Type 'quniform' will receive three values [low, high, q], where [low, high] specifies a range and 'q' specifies the interval Type 'quniform' will receive three values [low, high, q], where [low, high] specifies a range and 'q' specifies the interval
It will be sampled in a way that the first sampled value is 'low', and each of the following values is 'interval' larger than the value in front of it. It will be sampled in a way that the first sampled value is 'low',
and each of the following values is 'interval' larger than the value in front of it.
Type 'randint' gives all possible intergers in range[low, high). Note that 'high' is not included. Type 'randint' gives all possible intergers in range[low, high). Note that 'high' is not included.
''' '''
...@@ -132,7 +133,7 @@ class GridSearchTuner(Tuner): ...@@ -132,7 +133,7 @@ class GridSearchTuner(Tuner):
def generate_parameters(self, parameter_id, **kwargs): def generate_parameters(self, parameter_id, **kwargs):
self.count += 1 self.count += 1
while (self.count <= len(self.expanded_search_space)-1): while self.count <= len(self.expanded_search_space) - 1:
_params_tuple = convert_dict2tuple(self.expanded_search_space[self.count]) _params_tuple = convert_dict2tuple(self.expanded_search_space[self.count])
if _params_tuple in self.supplement_data: if _params_tuple in self.supplement_data:
self.count += 1 self.count += 1
...@@ -153,14 +154,14 @@ class GridSearchTuner(Tuner): ...@@ -153,14 +154,14 @@ class GridSearchTuner(Tuner):
""" """
_completed_num = 0 _completed_num = 0
for trial_info in data: for trial_info in data:
logger.info("Importing data, current processing progress %s / %s" %(_completed_num, len(data))) logger.info("Importing data, current processing progress %s / %s", _completed_num, len(data))
_completed_num += 1 _completed_num += 1
assert "parameter" in trial_info assert "parameter" in trial_info
_params = trial_info["parameter"] _params = trial_info["parameter"]
assert "value" in trial_info assert "value" in trial_info
_value = trial_info['value'] _value = trial_info['value']
if not _value: if not _value:
logger.info("Useless trial data, value is %s, skip this trial data." %_value) logger.info("Useless trial data, value is %s, skip this trial data.", _value)
continue continue
_params_tuple = convert_dict2tuple(_params) _params_tuple = convert_dict2tuple(_params)
self.supplement_data[_params_tuple] = True self.supplement_data[_params_tuple] = True
......
...@@ -32,7 +32,7 @@ from nni.common import multi_phase_enabled ...@@ -32,7 +32,7 @@ from nni.common import multi_phase_enabled
from nni.msg_dispatcher_base import MsgDispatcherBase from nni.msg_dispatcher_base import MsgDispatcherBase
from nni.protocol import CommandType, send from nni.protocol import CommandType, send
from nni.utils import NodeType, OptimizeMode, MetricType, extract_scalar_reward from nni.utils import NodeType, OptimizeMode, MetricType, extract_scalar_reward
import nni.parameter_expressions as parameter_expressions from nni import parameter_expressions
_logger = logging.getLogger(__name__) _logger = logging.getLogger(__name__)
...@@ -49,7 +49,7 @@ def create_parameter_id(): ...@@ -49,7 +49,7 @@ def create_parameter_id():
int int
parameter id parameter id
""" """
global _next_parameter_id # pylint: disable=global-statement global _next_parameter_id
_next_parameter_id += 1 _next_parameter_id += 1
return _next_parameter_id - 1 return _next_parameter_id - 1
...@@ -102,8 +102,7 @@ def json2parameter(ss_spec, random_state): ...@@ -102,8 +102,7 @@ def json2parameter(ss_spec, random_state):
_index = random_state.randint(len(_value)) _index = random_state.randint(len(_value))
chosen_params = json2parameter(ss_spec[NodeType.VALUE][_index], random_state) chosen_params = json2parameter(ss_spec[NodeType.VALUE][_index], random_state)
else: else:
chosen_params = eval('parameter_expressions.' + # pylint: disable=eval-used chosen_params = getattr(parameter_expressions, _type)(*(_value + [random_state]))
_type)(*(_value + [random_state]))
else: else:
chosen_params = dict() chosen_params = dict()
for key in ss_spec.keys(): for key in ss_spec.keys():
...@@ -140,8 +139,8 @@ class Bracket(): ...@@ -140,8 +139,8 @@ class Bracket():
self.bracket_id = s self.bracket_id = s
self.s_max = s_max self.s_max = s_max
self.eta = eta self.eta = eta
self.n = math.ceil((s_max + 1) * (eta ** s) / (s + 1) - _epsilon) # pylint: disable=invalid-name self.n = math.ceil((s_max + 1) * (eta ** s) / (s + 1) - _epsilon)
self.r = R / eta ** s # pylint: disable=invalid-name self.r = R / eta ** s
self.i = 0 self.i = 0
self.hyper_configs = [] # [ {id: params}, {}, ... ] self.hyper_configs = [] # [ {id: params}, {}, ... ]
self.configs_perf = [] # [ {id: [seq, acc]}, {}, ... ] self.configs_perf = [] # [ {id: [seq, acc]}, {}, ... ]
...@@ -197,7 +196,7 @@ class Bracket(): ...@@ -197,7 +196,7 @@ class Bracket():
i: int i: int
the ith round the ith round
""" """
global _KEY # pylint: disable=global-statement global _KEY
self.num_finished_configs[i] += 1 self.num_finished_configs[i] += 1
_logger.debug('bracket id: %d, round: %d %d, finished: %d, all: %d', self.bracket_id, self.i, i, _logger.debug('bracket id: %d, round: %d %d, finished: %d, all: %d', self.bracket_id, self.i, i,
self.num_finished_configs[i], self.num_configs_to_run[i]) self.num_finished_configs[i], self.num_configs_to_run[i])
...@@ -226,7 +225,7 @@ class Bracket(): ...@@ -226,7 +225,7 @@ class Bracket():
return [[key, value] for key, value in hyper_configs.items()] return [[key, value] for key, value in hyper_configs.items()]
return None return None
def get_hyperparameter_configurations(self, num, r, searchspace_json, random_state): # pylint: disable=invalid-name def get_hyperparameter_configurations(self, num, r, searchspace_json, random_state):
"""Randomly generate num hyperparameter configurations from search space """Randomly generate num hyperparameter configurations from search space
Parameters Parameters
...@@ -239,7 +238,7 @@ class Bracket(): ...@@ -239,7 +238,7 @@ class Bracket():
list list
a list of hyperparameter configurations. Format: [[key1, value1], [key2, value2], ...] a list of hyperparameter configurations. Format: [[key1, value1], [key2, value2], ...]
""" """
global _KEY # pylint: disable=global-statement global _KEY
assert self.i == 0 assert self.i == 0
hyperparameter_configs = dict() hyperparameter_configs = dict()
for _ in range(num): for _ in range(num):
...@@ -285,7 +284,7 @@ class Hyperband(MsgDispatcherBase): ...@@ -285,7 +284,7 @@ class Hyperband(MsgDispatcherBase):
def __init__(self, R=60, eta=3, optimize_mode='maximize'): def __init__(self, R=60, eta=3, optimize_mode='maximize'):
"""B = (s_max + 1)R""" """B = (s_max + 1)R"""
super(Hyperband, self).__init__() super(Hyperband, self).__init__()
self.R = R # pylint: disable=invalid-name self.R = R
self.eta = eta self.eta = eta
self.brackets = dict() # dict of Bracket self.brackets = dict() # dict of Bracket
self.generated_hyper_configs = [] # all the configs waiting for run self.generated_hyper_configs = [] # all the configs waiting for run
......
...@@ -51,13 +51,13 @@ def json2space(in_x, name=NodeType.ROOT): ...@@ -51,13 +51,13 @@ def json2space(in_x, name=NodeType.ROOT):
name = name + '-' + _type name = name + '-' + _type
_value = json2space(in_x[NodeType.VALUE], name=name) _value = json2space(in_x[NodeType.VALUE], name=name)
if _type == 'choice': if _type == 'choice':
out_y = eval('hp.hp.choice')(name, _value) out_y = hp.hp.choice(name, _value)
elif _type == 'randint': elif _type == 'randint':
out_y = hp.hp.randint(name, _value[1] - _value[0]) out_y = hp.hp.randint(name, _value[1] - _value[0])
else: else:
if _type in ['loguniform', 'qloguniform']: if _type in ['loguniform', 'qloguniform']:
_value[:2] = np.log(_value[:2]) _value[:2] = np.log(_value[:2])
out_y = eval('hp.hp.' + _type)(name, *_value) out_y = getattr(hp.hp, _type)(name, *_value)
else: else:
out_y = dict() out_y = dict()
for key in in_x.keys(): for key in in_x.keys():
...@@ -191,6 +191,7 @@ def _add_index(in_x, parameter): ...@@ -191,6 +191,7 @@ def _add_index(in_x, parameter):
return {NodeType.INDEX: pos, NodeType.VALUE: item} return {NodeType.INDEX: pos, NodeType.VALUE: item}
else: else:
return parameter return parameter
return None # note: this is not written by original author, feel free to modify if you think it's incorrect
class HyperoptTuner(Tuner): class HyperoptTuner(Tuner):
...@@ -409,8 +410,8 @@ class HyperoptTuner(Tuner): ...@@ -409,8 +410,8 @@ class HyperoptTuner(Tuner):
misc_by_id = {m['tid']: m for m in miscs} misc_by_id = {m['tid']: m for m in miscs}
for m in miscs: for m in miscs:
m['idxs'] = dict([(key, []) for key in idxs]) m['idxs'] = {key: [] for key in idxs}
m['vals'] = dict([(key, []) for key in idxs]) m['vals'] = {key: [] for key in idxs}
for key in idxs: for key in idxs:
assert len(idxs[key]) == len(vals[key]) assert len(idxs[key]) == len(vals[key])
...@@ -433,7 +434,7 @@ class HyperoptTuner(Tuner): ...@@ -433,7 +434,7 @@ class HyperoptTuner(Tuner):
total_params : dict total_params : dict
parameter suggestion parameter suggestion
""" """
if self.parallel and len(self.total_data)>20 and len(self.running_data) and self.optimal_y is not None: if self.parallel and len(self.total_data) > 20 and self.running_data and self.optimal_y is not None:
self.CL_rval = copy.deepcopy(self.rval) self.CL_rval = copy.deepcopy(self.rval)
if self.constant_liar_type == 'mean': if self.constant_liar_type == 'mean':
_constant_liar_y = self.optimal_y[0] / self.optimal_y[1] _constant_liar_y = self.optimal_y[0] / self.optimal_y[1]
...@@ -481,8 +482,7 @@ class HyperoptTuner(Tuner): ...@@ -481,8 +482,7 @@ class HyperoptTuner(Tuner):
""" """
_completed_num = 0 _completed_num = 0
for trial_info in data: for trial_info in data:
logger.info("Importing data, current processing progress %s / %s" % logger.info("Importing data, current processing progress %s / %s", _completed_num, len(data))
(_completed_num, len(data)))
_completed_num += 1 _completed_num += 1
if self.algorithm_name == 'random_search': if self.algorithm_name == 'random_search':
return return
...@@ -491,9 +491,7 @@ class HyperoptTuner(Tuner): ...@@ -491,9 +491,7 @@ class HyperoptTuner(Tuner):
assert "value" in trial_info assert "value" in trial_info
_value = trial_info['value'] _value = trial_info['value']
if not _value: if not _value:
logger.info( logger.info("Useless trial data, value is %s, skip this trial data.", _value)
"Useless trial data, value is %s, skip this trial data." %
_value)
continue continue
self.supplement_data_num += 1 self.supplement_data_num += 1
_parameter_id = '_'.join( _parameter_id = '_'.join(
......
...@@ -42,7 +42,7 @@ class MedianstopAssessor(Assessor): ...@@ -42,7 +42,7 @@ class MedianstopAssessor(Assessor):
self.high_better = False self.high_better = False
else: else:
self.high_better = True self.high_better = True
logger.warning('unrecognized optimize_mode', optimize_mode) logger.warning('unrecognized optimize_mode %s', optimize_mode)
def _update_data(self, trial_job_id, trial_history): def _update_data(self, trial_job_id, trial_history):
"""update data """update data
...@@ -121,10 +121,10 @@ class MedianstopAssessor(Assessor): ...@@ -121,10 +121,10 @@ class MedianstopAssessor(Assessor):
best_history = min(trial_history) best_history = min(trial_history)
avg_array = [] avg_array = []
for id in self.completed_avg_history: for id_ in self.completed_avg_history:
if len(self.completed_avg_history[id]) >= curr_step: if len(self.completed_avg_history[id_]) >= curr_step:
avg_array.append(self.completed_avg_history[id][curr_step - 1]) avg_array.append(self.completed_avg_history[id_][curr_step - 1])
if len(avg_array) > 0: if avg_array:
avg_array.sort() avg_array.sort()
if self.high_better: if self.high_better:
median = avg_array[(len(avg_array)-1) // 2] median = avg_array[(len(avg_array)-1) // 2]
......
...@@ -22,7 +22,6 @@ import random ...@@ -22,7 +22,6 @@ import random
from .medianstop_assessor import MedianstopAssessor from .medianstop_assessor import MedianstopAssessor
from nni.assessor import AssessResult from nni.assessor import AssessResult
logger = logging.getLogger('nni.contrib.medianstop_assessor') logger = logging.getLogger('nni.contrib.medianstop_assessor')
logger.debug('START') logger.debug('START')
......
...@@ -24,7 +24,6 @@ import sys ...@@ -24,7 +24,6 @@ import sys
import nni.metis_tuner.lib_acquisition_function as lib_acquisition_function import nni.metis_tuner.lib_acquisition_function as lib_acquisition_function
import nni.metis_tuner.lib_constraint_summation as lib_constraint_summation import nni.metis_tuner.lib_constraint_summation as lib_constraint_summation
import nni.metis_tuner.lib_data as lib_data
sys.path.insert(1, os.path.join(sys.path[0], '..')) sys.path.insert(1, os.path.join(sys.path[0], '..'))
......
...@@ -19,12 +19,12 @@ ...@@ -19,12 +19,12 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse, json, os, sys import os
import sys
from multiprocessing.dummy import Pool as ThreadPool from multiprocessing.dummy import Pool as ThreadPool
import nni.metis_tuner.Regression_GP.CreateModel as gp_create_model import nni.metis_tuner.Regression_GP.CreateModel as gp_create_model
import nni.metis_tuner.Regression_GP.Prediction as gp_prediction import nni.metis_tuner.Regression_GP.Prediction as gp_prediction
import nni.metis_tuner.lib_data as lib_data
sys.path.insert(1, os.path.join(sys.path[0], '..')) sys.path.insert(1, os.path.join(sys.path[0], '..'))
...@@ -71,14 +71,15 @@ def outlierDetection_threaded(samples_x, samples_y_aggregation): ...@@ -71,14 +71,15 @@ def outlierDetection_threaded(samples_x, samples_y_aggregation):
else: else:
print("error here.") print("error here.")
outliers = None if len(outliers) == 0 else outliers outliers = outliers if outliers else None
return outliers return outliers
def outlierDetection(samples_x, samples_y_aggregation): def outlierDetection(samples_x, samples_y_aggregation):
''' '''
TODO
''' '''
outliers = [] outliers = []
for samples_idx in range(0, len(samples_x)): for samples_idx, _ in enumerate(samples_x):
#sys.stderr.write("[%s] DEBUG: Evaluating %d of %d samples\n" #sys.stderr.write("[%s] DEBUG: Evaluating %d of %d samples\n"
# \ % (os.path.basename(__file__), samples_idx + 1, len(samples_x))) # \ % (os.path.basename(__file__), samples_idx + 1, len(samples_x)))
diagnostic_regressor_gp = gp_create_model.create_model(\ diagnostic_regressor_gp = gp_create_model.create_model(\
...@@ -93,5 +94,5 @@ def outlierDetection(samples_x, samples_y_aggregation): ...@@ -93,5 +94,5 @@ def outlierDetection(samples_x, samples_y_aggregation):
"expected_sigma": sigma, "expected_sigma": sigma,
"difference": abs(samples_y_aggregation[samples_idx] - mu) - (2.33 * sigma)}) "difference": abs(samples_y_aggregation[samples_idx] - mu) - (2.33 * sigma)})
outliers = None if len(outliers) == 0 else outliers outliers = outliers if outliers else None
return outliers return outliers
...@@ -31,6 +31,7 @@ def match_val_type(vals, vals_bounds, vals_types): ...@@ -31,6 +31,7 @@ def match_val_type(vals, vals_bounds, vals_types):
for i, _ in enumerate(vals_types): for i, _ in enumerate(vals_types):
if vals_types[i] == "discrete_int": if vals_types[i] == "discrete_int":
# Find the closest integer in the array, vals_bounds # Find the closest integer in the array, vals_bounds
# pylint: disable=cell-var-from-loop
vals_new.append(min(vals_bounds[i], key=lambda x: abs(x - vals[i]))) vals_new.append(min(vals_bounds[i], key=lambda x: abs(x - vals[i])))
elif vals_types[i] == "range_int": elif vals_types[i] == "range_int":
# Round down to the nearest integer # Round down to the nearest integer
...@@ -64,4 +65,3 @@ def rand(x_bounds, x_types): ...@@ -64,4 +65,3 @@ def rand(x_bounds, x_types):
return None return None
return outputs return outputs
\ No newline at end of file
...@@ -20,14 +20,11 @@ ...@@ -20,14 +20,11 @@
import copy import copy
import logging import logging
import numpy as np
import os
import random import random
import statistics import statistics
import sys
import warnings import warnings
from enum import Enum, unique
from multiprocessing.dummy import Pool as ThreadPool from multiprocessing.dummy import Pool as ThreadPool
import numpy as np
import nni.metis_tuner.lib_constraint_summation as lib_constraint_summation import nni.metis_tuner.lib_constraint_summation as lib_constraint_summation
import nni.metis_tuner.lib_data as lib_data import nni.metis_tuner.lib_data as lib_data
...@@ -99,6 +96,8 @@ class MetisTuner(Tuner): ...@@ -99,6 +96,8 @@ class MetisTuner(Tuner):
self.minimize_constraints_fun = None self.minimize_constraints_fun = None
self.minimize_starting_points = None self.minimize_starting_points = None
self.supplement_data_num = 0 self.supplement_data_num = 0
self.x_bounds = []
self.x_types = []
def update_search_space(self, search_space): def update_search_space(self, search_space):
...@@ -144,7 +143,7 @@ class MetisTuner(Tuner): ...@@ -144,7 +143,7 @@ class MetisTuner(Tuner):
self.x_types[idx] = 'discrete_int' self.x_types[idx] = 'discrete_int'
else: else:
logger.info("Metis Tuner doesn't support this kind of variable: " + str(key_type)) logger.info("Metis Tuner doesn't support this kind of variable: %s", key_type)
raise RuntimeError("Metis Tuner doesn't support this kind of variable: " + str(key_type)) raise RuntimeError("Metis Tuner doesn't support this kind of variable: " + str(key_type))
else: else:
logger.info("The format of search space is not a dict.") logger.info("The format of search space is not a dict.")
...@@ -198,7 +197,7 @@ class MetisTuner(Tuner): ...@@ -198,7 +197,7 @@ class MetisTuner(Tuner):
minimize_starting_points=self.minimize_starting_points, minimize_starting_points=self.minimize_starting_points,
minimize_constraints_fun=self.minimize_constraints_fun) minimize_constraints_fun=self.minimize_constraints_fun)
logger.info("Generate paramageters:\n" + str(results)) logger.info("Generate paramageters:\n%s", results)
return results return results
...@@ -217,8 +216,8 @@ class MetisTuner(Tuner): ...@@ -217,8 +216,8 @@ class MetisTuner(Tuner):
value = -value value = -value
logger.info("Received trial result.") logger.info("Received trial result.")
logger.info("value is :" + str(value)) logger.info("value is :%s", value)
logger.info("parameter is : " + str(parameters)) logger.info("parameter is : %s", parameters)
# parse parameter to sample_x # parse parameter to sample_x
sample_x = [0 for i in range(len(self.key_order))] sample_x = [0 for i in range(len(self.key_order))]
...@@ -271,10 +270,12 @@ class MetisTuner(Tuner): ...@@ -271,10 +270,12 @@ class MetisTuner(Tuner):
minimize_constraints_fun=minimize_constraints_fun) minimize_constraints_fun=minimize_constraints_fun)
if not lm_current: if not lm_current:
return None return None
logger.info({'hyperparameter': lm_current['hyperparameter'], logger.info({
'hyperparameter': lm_current['hyperparameter'],
'expected_mu': lm_current['expected_mu'], 'expected_mu': lm_current['expected_mu'],
'expected_sigma': lm_current['expected_sigma'], 'expected_sigma': lm_current['expected_sigma'],
'reason': "exploitation_gp"}) 'reason': "exploitation_gp"
})
if no_candidates is False: if no_candidates is False:
# ===== STEP 2: Get recommended configurations for exploration ===== # ===== STEP 2: Get recommended configurations for exploration =====
...@@ -289,10 +290,12 @@ class MetisTuner(Tuner): ...@@ -289,10 +290,12 @@ class MetisTuner(Tuner):
if results_exploration is not None: if results_exploration is not None:
if _num_past_samples(results_exploration['hyperparameter'], samples_x, samples_y) == 0: if _num_past_samples(results_exploration['hyperparameter'], samples_x, samples_y) == 0:
temp_candidate = {'hyperparameter': results_exploration['hyperparameter'], temp_candidate = {
'hyperparameter': results_exploration['hyperparameter'],
'expected_mu': results_exploration['expected_mu'], 'expected_mu': results_exploration['expected_mu'],
'expected_sigma': results_exploration['expected_sigma'], 'expected_sigma': results_exploration['expected_sigma'],
'reason': "exploration"} 'reason': "exploration"
}
candidates.append(temp_candidate) candidates.append(temp_candidate)
logger.info("DEBUG: 1 exploration candidate selected\n") logger.info("DEBUG: 1 exploration candidate selected\n")
...@@ -322,11 +325,14 @@ class MetisTuner(Tuner): ...@@ -322,11 +325,14 @@ class MetisTuner(Tuner):
if results_exploitation is not None: if results_exploitation is not None:
if _num_past_samples(results_exploitation['hyperparameter'], samples_x, samples_y) == 0: if _num_past_samples(results_exploitation['hyperparameter'], samples_x, samples_y) == 0:
temp_expected_mu, temp_expected_sigma = gp_prediction.predict(results_exploitation['hyperparameter'], gp_model['model']) temp_expected_mu, temp_expected_sigma = \
temp_candidate = {'hyperparameter': results_exploitation['hyperparameter'], gp_prediction.predict(results_exploitation['hyperparameter'], gp_model['model'])
temp_candidate = {
'hyperparameter': results_exploitation['hyperparameter'],
'expected_mu': temp_expected_mu, 'expected_mu': temp_expected_mu,
'expected_sigma': temp_expected_sigma, 'expected_sigma': temp_expected_sigma,
'reason': "exploitation_gmm"} 'reason': "exploitation_gmm"
}
candidates.append(temp_candidate) candidates.append(temp_candidate)
logger.info("DEBUG: 1 exploitation_gmm candidate selected\n") logger.info("DEBUG: 1 exploitation_gmm candidate selected\n")
...@@ -349,7 +355,7 @@ class MetisTuner(Tuner): ...@@ -349,7 +355,7 @@ class MetisTuner(Tuner):
results_outliers = gp_outlier_detection.outlierDetection_threaded(samples_x, samples_y_aggregation) results_outliers = gp_outlier_detection.outlierDetection_threaded(samples_x, samples_y_aggregation)
if results_outliers is not None: if results_outliers is not None:
for results_outlier in results_outliers: for results_outlier in results_outliers: # pylint: disable=not-an-iterable
if _num_past_samples(samples_x[results_outlier['samples_idx']], samples_x, samples_y) < max_resampling_per_x: if _num_past_samples(samples_x[results_outlier['samples_idx']], samples_x, samples_y) < max_resampling_per_x:
temp_candidate = {'hyperparameter': samples_x[results_outlier['samples_idx']],\ temp_candidate = {'hyperparameter': samples_x[results_outlier['samples_idx']],\
'expected_mu': results_outlier['expected_mu'],\ 'expected_mu': results_outlier['expected_mu'],\
...@@ -398,10 +404,12 @@ class MetisTuner(Tuner): ...@@ -398,10 +404,12 @@ class MetisTuner(Tuner):
next_candidate = {'hyperparameter': next_candidate, 'reason': "random", next_candidate = {'hyperparameter': next_candidate, 'reason': "random",
'expected_mu': expected_mu, 'expected_sigma': expected_sigma} 'expected_mu': expected_mu, 'expected_sigma': expected_sigma}
# ===== STEP 7: If current optimal hyperparameter occurs in the history or exploration probability is less than the threshold, take next config as exploration step ===== # ===== STEP 7 =====
# If current optimal hyperparameter occurs in the history or exploration probability is less than the threshold,
# take next config as exploration step
outputs = self._pack_output(lm_current['hyperparameter']) outputs = self._pack_output(lm_current['hyperparameter'])
ap = random.uniform(0, 1) ap = random.uniform(0, 1)
if outputs in self.total_data or ap<=self.exploration_probability: if outputs in self.total_data or ap <= self.exploration_probability:
if next_candidate is not None: if next_candidate is not None:
outputs = self._pack_output(next_candidate['hyperparameter']) outputs = self._pack_output(next_candidate['hyperparameter'])
else: else:
...@@ -419,14 +427,14 @@ class MetisTuner(Tuner): ...@@ -419,14 +427,14 @@ class MetisTuner(Tuner):
""" """
_completed_num = 0 _completed_num = 0
for trial_info in data: for trial_info in data:
logger.info("Importing data, current processing progress %s / %s" %(_completed_num, len(data))) logger.info("Importing data, current processing progress %s / %s", _completed_num, len(data))
_completed_num += 1 _completed_num += 1
assert "parameter" in trial_info assert "parameter" in trial_info
_params = trial_info["parameter"] _params = trial_info["parameter"]
assert "value" in trial_info assert "value" in trial_info
_value = trial_info['value'] _value = trial_info['value']
if not _value: if not _value:
logger.info("Useless trial data, value is %s, skip this trial data." %_value) logger.info("Useless trial data, value is %s, skip this trial data.", _value)
continue continue
self.supplement_data_num += 1 self.supplement_data_num += 1
_parameter_id = '_'.join(["ImportData", str(self.supplement_data_num)]) _parameter_id = '_'.join(["ImportData", str(self.supplement_data_num)])
......
...@@ -61,7 +61,7 @@ _customized_parameter_ids = set() ...@@ -61,7 +61,7 @@ _customized_parameter_ids = set()
def _create_parameter_id(): def _create_parameter_id():
global _next_parameter_id # pylint: disable=global-statement global _next_parameter_id
_next_parameter_id += 1 _next_parameter_id += 1
return _next_parameter_id - 1 return _next_parameter_id - 1
...@@ -106,15 +106,15 @@ class MsgDispatcher(MsgDispatcherBase): ...@@ -106,15 +106,15 @@ class MsgDispatcher(MsgDispatcherBase):
self.tuner.update_search_space(data) self.tuner.update_search_space(data)
send(CommandType.Initialized, '') send(CommandType.Initialized, '')
def send_trial_callback(self, id, params): def send_trial_callback(self, id_, params):
"""For tuner to issue trial config when the config is generated """For tuner to issue trial config when the config is generated
""" """
send(CommandType.NewTrialJob, _pack_parameter(id, params)) send(CommandType.NewTrialJob, _pack_parameter(id_, params))
def handle_request_trial_jobs(self, data): def handle_request_trial_jobs(self, data):
# data: number or trial jobs # data: number or trial jobs
ids = [_create_parameter_id() for _ in range(data)] ids = [_create_parameter_id() for _ in range(data)]
_logger.debug("requesting for generating params of {}".format(ids)) _logger.debug("requesting for generating params of %s", ids)
params_list = self.tuner.generate_multiple_parameters(ids, st_callback=self.send_trial_callback) params_list = self.tuner.generate_multiple_parameters(ids, st_callback=self.send_trial_callback)
for i, _ in enumerate(params_list): for i, _ in enumerate(params_list):
...@@ -218,7 +218,8 @@ class MsgDispatcher(MsgDispatcherBase): ...@@ -218,7 +218,8 @@ class MsgDispatcher(MsgDispatcherBase):
try: try:
result = self.assessor.assess_trial(trial_job_id, ordered_history) result = self.assessor.assess_trial(trial_job_id, ordered_history)
except Exception as e: except Exception as e:
_logger.exception('Assessor error') _logger.error('Assessor error')
_logger.exception(e)
if isinstance(result, bool): if isinstance(result, bool):
result = AssessResult.Good if result else AssessResult.Bad result = AssessResult.Good if result else AssessResult.Bad
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ================================================================================================== # ==================================================================================================
import os
import threading import threading
import logging import logging
from multiprocessing.dummy import Pool as ThreadPool from multiprocessing.dummy import Pool as ThreadPool
...@@ -146,7 +145,7 @@ class MsgDispatcherBase(Recoverable): ...@@ -146,7 +145,7 @@ class MsgDispatcherBase(Recoverable):
pass pass
def process_command(self, command, data): def process_command(self, command, data):
_logger.debug('process_command: command: [{}], data: [{}]'.format(command, data)) _logger.debug('process_command: command: [%s], data: [%s]', command, data)
command_handlers = { command_handlers = {
# Tuner commands: # Tuner commands:
......
...@@ -25,6 +25,11 @@ from . import trial ...@@ -25,6 +25,11 @@ from . import trial
_logger = logging.getLogger(__name__) _logger = logging.getLogger(__name__)
_MUTABLE_LAYER_SPACE_PREFIX = "_mutable_layer" _MUTABLE_LAYER_SPACE_PREFIX = "_mutable_layer"
_namespace = {}
_tf_variables = {}
_arch_logits_list = []
_optimizer = None
_train_op = None
def classic_mode( def classic_mode(
...@@ -64,47 +69,40 @@ def enas_mode( ...@@ -64,47 +69,40 @@ def enas_mode(
it can be known which inputs should be masked and which op should be executed.''' it can be known which inputs should be masked and which op should be executed.'''
name_prefix = "{}_{}".format(mutable_id, mutable_layer_id) name_prefix = "{}_{}".format(mutable_id, mutable_layer_id)
# store namespace # store namespace
if 'name_space' not in globals(): _namespace[mutable_id] = True
global name_space _namespace[name_prefix] = dict()
name_space = dict() _namespace[name_prefix]['funcs'] = list(funcs)
name_space[mutable_id] = True _namespace[name_prefix]['optional_inputs'] = list(optional_inputs)
name_space[name_prefix] = dict()
name_space[name_prefix]['funcs'] = list(funcs)
name_space[name_prefix]['optional_inputs'] = list(optional_inputs)
# create tensorflow variables as 1/0 signals used to form subgraph # create tensorflow variables as 1/0 signals used to form subgraph
if 'tf_variables' not in globals():
global tf_variables
tf_variables = dict()
name_for_optional_inputs = name_prefix + '_optional_inputs' name_for_optional_inputs = name_prefix + '_optional_inputs'
name_for_funcs = name_prefix + '_funcs' name_for_funcs = name_prefix + '_funcs'
tf_variables[name_prefix] = dict() _tf_variables[name_prefix] = dict()
tf_variables[name_prefix]['optional_inputs'] = tf.get_variable(name_for_optional_inputs, _tf_variables[name_prefix]['optional_inputs'] = tf.get_variable(
[len( name_for_optional_inputs,
optional_inputs)], [len(optional_inputs)],
dtype=tf.bool, dtype=tf.bool,
trainable=False) trainable=False
tf_variables[name_prefix]['funcs'] = tf.get_variable( )
_tf_variables[name_prefix]['funcs'] = tf.get_variable(
name_for_funcs, [], dtype=tf.int64, trainable=False) name_for_funcs, [], dtype=tf.int64, trainable=False)
# get real values using their variable names # get real values using their variable names
real_optional_inputs_value = [optional_inputs[name] real_optional_inputs_value = [optional_inputs[name]
for name in name_space[name_prefix]['optional_inputs']] for name in _namespace[name_prefix]['optional_inputs']]
real_func_value = [funcs[name] real_func_value = [funcs[name]
for name in name_space[name_prefix]['funcs']] for name in _namespace[name_prefix]['funcs']]
real_funcs_args = [funcs_args[name] real_funcs_args = [funcs_args[name]
for name in name_space[name_prefix]['funcs']] for name in _namespace[name_prefix]['funcs']]
# build tensorflow graph of geting chosen inputs by masking # build tensorflow graph of geting chosen inputs by masking
real_chosen_inputs = tf.boolean_mask( real_chosen_inputs = tf.boolean_mask(
real_optional_inputs_value, tf_variables[name_prefix]['optional_inputs']) real_optional_inputs_value, _tf_variables[name_prefix]['optional_inputs'])
# build tensorflow graph of different branches by using tf.case # build tensorflow graph of different branches by using tf.case
branches = dict() branches = dict()
func_output = None
for func_id in range(len(funcs)): for func_id in range(len(funcs)):
func_output = real_func_value[func_id]( func_output = real_func_value[func_id]([fixed_inputs, real_chosen_inputs], **real_funcs_args[func_id])
[fixed_inputs, real_chosen_inputs], **real_funcs_args[func_id]) branches[tf.equal(_tf_variables[name_prefix]['funcs'], func_id)] = lambda: func_output
branches[tf.equal(tf_variables[name_prefix]['funcs'], layer_out = tf.case(branches, exclusive=True, default=lambda: func_output)
func_id)] = lambda: func_output
layer_out = tf.case(branches, exclusive=True,
default=lambda: func_output)
return layer_out return layer_out
...@@ -157,12 +155,9 @@ def darts_mode( ...@@ -157,12 +155,9 @@ def darts_mode(
layer_outs = [func([fixed_inputs, optional_inputs], **funcs_args[func_name]) layer_outs = [func([fixed_inputs, optional_inputs], **funcs_args[func_name])
for func_name, func in funcs.items()] for func_name, func in funcs.items()]
# Create architecture weights for every func(op) # Create architecture weights for every func(op)
var_name = "{}_{}_".format(mutable_id, mutable_layer_id, "arch_weights") var_name = "{}_{}_arch_weights".format(mutable_id, mutable_layer_id)
if 'arch_logits_list' not in globals(): arch_logits = tf.get_variable(var_name, shape=[len(funcs)], trainable=False)
global arch_logits_list _arch_logits_list.append(arch_logits)
arch_logits_list = list()
arch_logits = tf.get_variable(var_name, shape=[len[funcs]], trainable=False)
arch_logits_list.append(arch_logits)
arch_weights = tf.nn.softmax(arch_logits) arch_weights = tf.nn.softmax(arch_logits)
layer_out = tf.add_n([arch_weights[idx] * out for idx, out in enumerate(layer_outs)]) layer_out = tf.add_n([arch_weights[idx] * out for idx, out in enumerate(layer_outs)])
...@@ -186,19 +181,19 @@ def reload_tensorflow_variables(tf, session): ...@@ -186,19 +181,19 @@ def reload_tensorflow_variables(tf, session):
mutable_layers.add((mutable_id, mutable_layer_id)) mutable_layers.add((mutable_id, mutable_layer_id))
mutable_layers = sorted(list(mutable_layers)) mutable_layers = sorted(list(mutable_layers))
for mutable_id, mutable_layer_id in mutable_layers: for mutable_id, mutable_layer_id in mutable_layers:
if mutable_id not in name_space: if mutable_id not in _namespace:
_logger.warning("{} not found in name space".format(mutable_id)) _logger.warning("%s not found in name space", mutable_id)
continue continue
name_prefix = "{}_{}".format(mutable_id, mutable_layer_id) name_prefix = "{}_{}".format(mutable_id, mutable_layer_id)
# get optional inputs names # get optional inputs names
optional_inputs = name_space[name_prefix]['optional_inputs'] optional_inputs = _namespace[name_prefix]['optional_inputs']
# extract layer information from the subgraph sampled by tuner # extract layer information from the subgraph sampled by tuner
chosen_layer, chosen_inputs = _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id, optional_inputs) chosen_layer, chosen_inputs = _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id, optional_inputs)
chosen_layer = name_space[name_prefix]['funcs'].index(chosen_layer) chosen_layer = _namespace[name_prefix]['funcs'].index(chosen_layer)
chosen_inputs = [1 if inp in chosen_inputs else 0 for inp in optional_inputs] chosen_inputs = [1 if inp in chosen_inputs else 0 for inp in optional_inputs]
# load these information into pre-defined tensorflow variables # load these information into pre-defined tensorflow variables
tf_variables[name_prefix]['funcs'].load(chosen_layer, session) _tf_variables[name_prefix]['funcs'].load(chosen_layer, session)
tf_variables[name_prefix]['optional_inputs'].load( _tf_variables[name_prefix]['optional_inputs'].load(
chosen_inputs, session) chosen_inputs, session)
...@@ -218,15 +213,13 @@ def _decompose_general_key(key): ...@@ -218,15 +213,13 @@ def _decompose_general_key(key):
def darts_training(tf, session, loss, feed_dict): def darts_training(tf, session, loss, feed_dict):
if 'optimizer' not in globals(): global _optimizer, _train_op
global arch_logits_list if _optimizer is None:
global optimizer _optimizer = tf.MomentumOptimizer(learning_rate=0.025)
global train_op
optimizer = tf.MomentumOptimizer(learning_rate=0.025)
# TODO: Calculate loss # TODO: Calculate loss
grads_and_vars = optimizer.compute_gradients(loss, arch_logits_list) grads_and_vars = _optimizer.compute_gradients(loss, _arch_logits_list)
train_op = optimizer.apply_gradients(grads_and_vars) _train_op = _optimizer.apply_gradients(grads_and_vars)
session.run(train_op) session.run(_train_op)
def training_update(nas_mode, tf=None, session=None, loss=None, feed_dict=None): def training_update(nas_mode, tf=None, session=None, loss=None, feed_dict=None):
...@@ -258,12 +251,11 @@ def _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id, optional_inpu ...@@ -258,12 +251,11 @@ def _get_layer_and_inputs_from_tuner(mutable_id, mutable_layer_id, optional_inpu
chosen_inputs = [] chosen_inputs = []
# make sure dict -> list produce stable result by sorting # make sure dict -> list produce stable result by sorting
optional_inputs_keys = sorted(optional_inputs) optional_inputs_keys = sorted(optional_inputs)
for i in range(optional_input_size): for _ in range(optional_input_size):
chosen_inputs.append(optional_inputs_keys[optional_input_state % len(optional_inputs)]) chosen_inputs.append(optional_inputs_keys[optional_input_state % len(optional_inputs)])
optional_input_state //= len(optional_inputs) optional_input_state //= len(optional_inputs)
_logger.info("%s_%s: layer: %s, optional inputs: %s" % (mutable_id, mutable_layer_id, _logger.info("%s_%s: layer: %s, optional inputs: %s", mutable_id, mutable_layer_id, chosen_layer, chosen_inputs)
chosen_layer, chosen_inputs))
return chosen_layer, chosen_inputs return chosen_layer, chosen_inputs
...@@ -278,12 +270,12 @@ def convert_nas_search_space(search_space): ...@@ -278,12 +270,12 @@ def convert_nas_search_space(search_space):
if "_type" not in v: if "_type" not in v:
# this should not happen # this should not happen
_logger.warning("There is no _type in one of your search space values with key '%s'" _logger.warning("There is no _type in one of your search space values with key '%s'"
". Please check your search space" % k) ". Please check your search space", k)
ret[k] = v ret[k] = v
elif v["_type"] != "mutable_layer": elif v["_type"] != "mutable_layer":
ret[k] = v ret[k] = v
else: else:
_logger.info("Converting mutable_layer search space with key '%s'" % k) _logger.info("Converting mutable_layer search space with key '%s'", k)
# v["_value"] looks like {'mutable_layer_1': {'layer_choice': ...} ...} # v["_value"] looks like {'mutable_layer_1': {'layer_choice': ...} ...}
values = v["_value"] values = v["_value"]
for layer_name, layer_data in values.items(): for layer_name, layer_data in values.items():
...@@ -305,13 +297,13 @@ def convert_nas_search_space(search_space): ...@@ -305,13 +297,13 @@ def convert_nas_search_space(search_space):
_logger.error("Might not be able to handle optional_input_size < 0, please double check") _logger.error("Might not be able to handle optional_input_size < 0, please double check")
input_size[1] += 1 input_size[1] += 1
else: else:
_logger.info("Optional input choices are set to empty by default in %s" % layer_key) _logger.info("Optional input choices are set to empty by default in %s", layer_key)
input_size = [0, 1] input_size = [0, 1]
if layer_data.get("optional_inputs"): if layer_data.get("optional_inputs"):
total_state_size = len(layer_data["optional_inputs"]) ** (input_size[1] - 1) total_state_size = len(layer_data["optional_inputs"]) ** (input_size[1] - 1)
else: else:
_logger.info("Optional inputs not found in %s" % layer_key) _logger.info("Optional inputs not found in %s", layer_key)
total_state_size = 1 total_state_size = 1
converted = { converted = {
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
# ================================================================================================== # ==================================================================================================
import json import json
from collections import Iterable from collections.abc import Iterable
from copy import deepcopy, copy from copy import deepcopy, copy
from queue import Queue from queue import Queue
...@@ -653,7 +653,7 @@ class Graph: ...@@ -653,7 +653,7 @@ class Graph:
return JSONModel(self).data return JSONModel(self).data
@classmethod @classmethod
def parsing_json_model(self, json_model): def parsing_json_model(cls, json_model):
'''build a graph from json '''build a graph from json
''' '''
return json_to_graph(json_model) return json_to_graph(json_model)
...@@ -910,7 +910,6 @@ def graph_to_onnx(graph, onnx_model_path): ...@@ -910,7 +910,6 @@ def graph_to_onnx(graph, onnx_model_path):
def onnx_to_graph(onnx_model, input_shape): def onnx_to_graph(onnx_model, input_shape):
import onnx
# to do in the future using onnx ir # to do in the future using onnx ir
graph = Graph(input_shape, False) graph = Graph(input_shape, False)
graph.parsing_onnx_model(onnx_model) graph.parsing_onnx_model(onnx_model)
......
...@@ -124,7 +124,7 @@ def wider_pre_conv(layer, n_add_filters, weighted=True): ...@@ -124,7 +124,7 @@ def wider_pre_conv(layer, n_add_filters, weighted=True):
student_w = teacher_w.copy() student_w = teacher_w.copy()
student_b = teacher_b.copy() student_b = teacher_b.copy()
# target layer update (i) # target layer update (i)
for i in range(len(rand)): for i, _ in enumerate(rand):
teacher_index = rand[i] teacher_index = rand[i]
new_weight = teacher_w[teacher_index, ...] new_weight = teacher_w[teacher_index, ...]
new_weight = new_weight[np.newaxis, ...] new_weight = new_weight[np.newaxis, ...]
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
# ================================================================================================== # ==================================================================================================
from abc import abstractmethod from abc import abstractmethod
from collections import Iterable from collections.abc import Iterable
import torch import torch
from torch import nn from torch import nn
...@@ -76,7 +76,6 @@ class StubLayer: ...@@ -76,7 +76,6 @@ class StubLayer:
def build(self, shape): def build(self, shape):
'''build shape. '''build shape.
''' '''
pass
def set_weights(self, weights): def set_weights(self, weights):
'''set weights. '''set weights.
...@@ -86,22 +85,18 @@ class StubLayer: ...@@ -86,22 +85,18 @@ class StubLayer:
def import_weights(self, torch_layer): def import_weights(self, torch_layer):
'''import weights. '''import weights.
''' '''
pass
def import_weights_keras(self, keras_layer): def import_weights_keras(self, keras_layer):
'''import weights from keras layer. '''import weights from keras layer.
''' '''
pass
def export_weights(self, torch_layer): def export_weights(self, torch_layer):
'''export weights. '''export weights.
''' '''
pass
def export_weights_keras(self, keras_layer): def export_weights_keras(self, keras_layer):
'''export weights to keras layer. '''export weights to keras layer.
''' '''
pass
def get_weights(self): def get_weights(self):
'''get weights. '''get weights.
...@@ -122,7 +117,6 @@ class StubLayer: ...@@ -122,7 +117,6 @@ class StubLayer:
def to_real_layer(self): def to_real_layer(self):
'''to real layer. '''to real layer.
''' '''
pass
def __str__(self): def __str__(self):
'''str() function to print. '''str() function to print.
...@@ -576,6 +570,7 @@ def to_real_keras_layer(layer): ...@@ -576,6 +570,7 @@ def to_real_keras_layer(layer):
return layers.Flatten() return layers.Flatten()
if is_layer(layer, "GlobalAveragePooling"): if is_layer(layer, "GlobalAveragePooling"):
return layers.GlobalAveragePooling2D() return layers.GlobalAveragePooling2D()
return None # note: this is not written by original author, feel free to modify if you think it's incorrect
def is_layer(layer, layer_type): def is_layer(layer, layer_type):
...@@ -608,6 +603,7 @@ def is_layer(layer, layer_type): ...@@ -608,6 +603,7 @@ def is_layer(layer, layer_type):
return isinstance(layer, (StubFlatten,)) return isinstance(layer, (StubFlatten,))
elif layer_type == "GlobalAveragePooling": elif layer_type == "GlobalAveragePooling":
return isinstance(layer, StubGlobalPooling) return isinstance(layer, StubGlobalPooling)
return None # note: this is not written by original author, feel free to modify if you think it's incorrect
def layer_description_extractor(layer, node_to_id): def layer_description_extractor(layer, node_to_id):
...@@ -664,7 +660,6 @@ def layer_description_extractor(layer, node_to_id): ...@@ -664,7 +660,6 @@ def layer_description_extractor(layer, node_to_id):
def layer_description_builder(layer_information, id_to_node): def layer_description_builder(layer_information, id_to_node):
'''build layer from description. '''build layer from description.
''' '''
# pylint: disable=W0123
layer_type = layer_information[0] layer_type = layer_information[0]
layer_input_ids = layer_information[1] layer_input_ids = layer_information[1]
...@@ -678,26 +673,26 @@ def layer_description_builder(layer_information, id_to_node): ...@@ -678,26 +673,26 @@ def layer_description_builder(layer_information, id_to_node):
filters = layer_information[4] filters = layer_information[4]
kernel_size = layer_information[5] kernel_size = layer_information[5]
stride = layer_information[6] stride = layer_information[6]
return eval(layer_type)( return globals()[layer_type](
input_channel, filters, kernel_size, stride, layer_input, layer_output input_channel, filters, kernel_size, stride, layer_input, layer_output
) )
elif layer_type.startswith("StubDense"): elif layer_type.startswith("StubDense"):
input_units = layer_information[3] input_units = layer_information[3]
units = layer_information[4] units = layer_information[4]
return eval(layer_type)(input_units, units, layer_input, layer_output) return globals()[layer_type](input_units, units, layer_input, layer_output)
elif layer_type.startswith("StubBatchNormalization"): elif layer_type.startswith("StubBatchNormalization"):
num_features = layer_information[3] num_features = layer_information[3]
return eval(layer_type)(num_features, layer_input, layer_output) return globals()[layer_type](num_features, layer_input, layer_output)
elif layer_type.startswith("StubDropout"): elif layer_type.startswith("StubDropout"):
rate = layer_information[3] rate = layer_information[3]
return eval(layer_type)(rate, layer_input, layer_output) return globals()[layer_type](rate, layer_input, layer_output)
elif layer_type.startswith("StubPooling"): elif layer_type.startswith("StubPooling"):
kernel_size = layer_information[3] kernel_size = layer_information[3]
stride = layer_information[4] stride = layer_information[4]
padding = layer_information[5] padding = layer_information[5]
return eval(layer_type)(kernel_size, stride, padding, layer_input, layer_output) return globals()[layer_type](kernel_size, stride, padding, layer_input, layer_output)
else: else:
return eval(layer_type)(layer_input, layer_output) return globals()[layer_type](layer_input, layer_output)
def layer_width(layer): def layer_width(layer):
......
...@@ -310,4 +310,3 @@ class NetworkMorphismTuner(Tuner): ...@@ -310,4 +310,3 @@ class NetworkMorphismTuner(Tuner):
def import_data(self, data): def import_data(self, data):
pass pass
...@@ -19,8 +19,6 @@ ...@@ -19,8 +19,6 @@
# ================================================================================================== # ==================================================================================================
# pylint: disable=wildcard-import
from ..env_vars import trial_env_vars from ..env_vars import trial_env_vars
if trial_env_vars.NNI_PLATFORM is None: if trial_env_vars.NNI_PLATFORM is None:
......
...@@ -58,7 +58,7 @@ def request_next_parameter(): ...@@ -58,7 +58,7 @@ def request_next_parameter():
def get_next_parameter(): def get_next_parameter():
global _param_index global _param_index
params_file_name = '' params_file_name = ''
if _multiphase and (_multiphase == 'true' or _multiphase == 'True'): if _multiphase in ('true', 'True'):
params_file_name = ('parameter_{}.cfg'.format(_param_index), 'parameter.cfg')[_param_index == 0] params_file_name = ('parameter_{}.cfg'.format(_param_index), 'parameter.cfg')[_param_index == 0]
else: else:
if _param_index > 0: if _param_index > 0:
...@@ -92,7 +92,7 @@ def send_metric(string): ...@@ -92,7 +92,7 @@ def send_metric(string):
file = open(_metric_file.name) file = open(_metric_file.name)
file.close() file.close()
else: else:
subprocess.run(['touch', _metric_file.name], check = True) subprocess.run(['touch', _metric_file.name], check=True)
def get_experiment_id(): def get_experiment_id():
return trial_env_vars.NNI_EXP_ID return trial_env_vars.NNI_EXP_ID
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment