Commit 594924a9 authored by quzha's avatar quzha
Browse files

Merge branch 'master' of github.com:Microsoft/nni into dev-nas-refactor

parents d43fbe82 262fabf1
......@@ -27,21 +27,21 @@ class MedianstopAssessor(Assessor):
Parameters
----------
optimize_mode: str
optimize_mode : str
optimize mode, 'maximize' or 'minimize'
start_step: int
start_step : int
only after receiving start_step number of reported intermediate results
"""
def __init__(self, optimize_mode='maximize', start_step=0):
self.start_step = start_step
self.running_history = dict()
self.completed_avg_history = dict()
self._start_step = start_step
self._running_history = dict()
self._completed_avg_history = dict()
if optimize_mode == 'maximize':
self.high_better = True
self._high_better = True
elif optimize_mode == 'minimize':
self.high_better = False
self._high_better = False
else:
self.high_better = True
self._high_better = True
logger.warning('unrecognized optimize_mode %s', optimize_mode)
def _update_data(self, trial_job_id, trial_history):
......@@ -49,35 +49,35 @@ class MedianstopAssessor(Assessor):
Parameters
----------
trial_job_id: int
trial_job_id : int
trial job id
trial_history: list
trial_history : list
The history performance matrix of each trial
"""
if trial_job_id not in self.running_history:
self.running_history[trial_job_id] = []
self.running_history[trial_job_id].extend(trial_history[len(self.running_history[trial_job_id]):])
if trial_job_id not in self._running_history:
self._running_history[trial_job_id] = []
self._running_history[trial_job_id].extend(trial_history[len(self._running_history[trial_job_id]):])
def trial_end(self, trial_job_id, success):
"""trial_end
Parameters
----------
trial_job_id: int
trial_job_id : int
trial job id
success: bool
success : bool
True if succssfully finish the experiment, False otherwise
"""
if trial_job_id in self.running_history:
if trial_job_id in self._running_history:
if success:
cnt = 0
history_sum = 0
self.completed_avg_history[trial_job_id] = []
for each in self.running_history[trial_job_id]:
self._completed_avg_history[trial_job_id] = []
for each in self._running_history[trial_job_id]:
cnt += 1
history_sum += each
self.completed_avg_history[trial_job_id].append(history_sum / cnt)
self.running_history.pop(trial_job_id)
self._completed_avg_history[trial_job_id].append(history_sum / cnt)
self._running_history.pop(trial_job_id)
else:
logger.warning('trial_end: trial_job_id does not exist in running_history')
......@@ -86,9 +86,9 @@ class MedianstopAssessor(Assessor):
Parameters
----------
trial_job_id: int
trial_job_id : int
trial job id
trial_history: list
trial_history : list
The history performance matrix of each trial
Returns
......@@ -102,7 +102,7 @@ class MedianstopAssessor(Assessor):
unrecognize exception in medianstop_assessor
"""
curr_step = len(trial_history)
if curr_step < self.start_step:
if curr_step < self._start_step:
return AssessResult.Good
try:
......@@ -115,18 +115,18 @@ class MedianstopAssessor(Assessor):
logger.exception(error)
self._update_data(trial_job_id, num_trial_history)
if self.high_better:
if self._high_better:
best_history = max(trial_history)
else:
best_history = min(trial_history)
avg_array = []
for id_ in self.completed_avg_history:
if len(self.completed_avg_history[id_]) >= curr_step:
avg_array.append(self.completed_avg_history[id_][curr_step - 1])
for id_ in self._completed_avg_history:
if len(self._completed_avg_history[id_]) >= curr_step:
avg_array.append(self._completed_avg_history[id_][curr_step - 1])
if avg_array:
avg_array.sort()
if self.high_better:
if self._high_better:
median = avg_array[(len(avg_array)-1) // 2]
return AssessResult.Bad if best_history < median else AssessResult.Good
else:
......
......@@ -77,7 +77,7 @@ class PdType:
class CategoricalPd(Pd):
"""
categorical prossibility distribution
Categorical prossibility distribution
"""
def __init__(self, logits, mask_npinf, nsteps, size, is_act_model):
self.logits = logits
......@@ -154,7 +154,7 @@ class CategoricalPd(Pd):
class CategoricalPdType(PdType):
"""
to create CategoricalPd
To create CategoricalPd
"""
def __init__(self, ncat, nsteps, np_mask, is_act_model):
self.ncat = ncat
......@@ -180,7 +180,7 @@ class CategoricalPdType(PdType):
def _matching_fc(tensor, name, size, nsteps, init_scale, init_bias, np_mask, is_act_model):
"""
add fc op, and add mask op when not in action mode
Add fc op, and add mask op when not in action mode
"""
if tensor.shape[-1] == size:
assert False
......
......@@ -40,9 +40,6 @@ class Model:
"""
def __init__(self, *, policy, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm, microbatch_size=None, np_mask=None):
"""
init
"""
self.sess = sess = get_session()
with tf.variable_scope('ppo2_model', reuse=tf.AUTO_REUSE):
......@@ -137,9 +134,13 @@ class Model:
def train(self, lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
"""
train the model.
Train the model.
Here we calculate advantage A(s,a) = R + yV(s') - V(s)
Returns = R + yV(s')
Returns
-------
obj
= R + yV(s')
"""
advs = returns - values
......
......@@ -34,14 +34,20 @@ class PolicyWithValue:
def __init__(self, env, observations, latent, estimate_q=False, vf_latent=None, sess=None, np_mask=None, is_act_model=False, **tensors):
"""
Parameters:
Parameters
----------
env: RL environment
observations: tensorflow placeholder in which the observations will be fed
latent: latent state from which policy distribution parameters should be inferred
vf_latent: latent state from which value function should be inferred (if None, then latent is used)
sess: tensorflow session to run calculations in (if None, default session is used)
**tensors: tensorflow tensors for additional attributes such as state or mask
env : obj
RL environment
observations : tensorflow placeholder
Tensorflow placeholder in which the observations will be fed
latent : tensor
Latent state from which policy distribution parameters should be inferred
vf_latent : tensor
Latent state from which value function should be inferred (if None, then latent is used)
sess : tensorflow session
Tensorflow session to run calculations in (if None, default session is used)
**tensors
Tensorflow tensors for additional attributes such as state or mask
"""
self.X = observations
......@@ -138,12 +144,14 @@ class PolicyWithValue:
"""
Compute next action(s) given the observation(s)
Parameters:
Parameters
----------
observation: observation data (either single or a batch)
**extra_feed: additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
observation : np array
Observation data (either single or a batch)
**extra_feed
Additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
Returns
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
......@@ -157,22 +165,40 @@ class PolicyWithValue:
"""
Compute value estimate(s) given the observation(s)
Parameters:
Parameters
----------
observation: observation data (either single or a batch)
**extra_feed: additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
observation : np array
Observation data (either single or a batch)
**extra_feed
Additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
Returns
-------
value estimate
Value estimate
"""
return self._evaluate(self.vf, ob, *args, **kwargs)
def build_lstm_policy(model_config, value_network=None, estimate_q=False, **policy_kwargs):
"""
build lstm policy and value network, they share the same lstm network.
Build lstm policy and value network, they share the same lstm network.
the parameters all use their default values.
Parameter
---------
model_config : obj
Configurations of the model
value_network : obj
The network for value function
estimate_q : bool
Whether to estimate ``q``
**policy_kwargs
The kwargs for policy network, i.e., lstm model
Returns
-------
func
The policy network
"""
policy_network = lstm_model(**policy_kwargs)
......
......@@ -38,8 +38,10 @@ from .policy import build_lstm_policy
logger = logging.getLogger('ppo_tuner_AutoML')
def constfn(val):
"""wrap as function"""
def _constfn(val):
"""
Wrap as function
"""
def f(_):
return val
return f
......@@ -90,7 +92,7 @@ class TrialsInfo:
def get_next(self):
"""
get actions of the next trial
Get actions of the next trial
"""
if self.iter >= self.inf_batch_size:
return None, None
......@@ -102,14 +104,14 @@ class TrialsInfo:
def update_rewards(self, rewards, returns):
"""
after the trial is finished, reward and return of this trial is updated
After the trial is finished, reward and return of this trial is updated
"""
self.rewards = rewards
self.returns = returns
def convert_shape(self):
"""
convert shape
Convert shape
"""
def sf01(arr):
"""
......@@ -138,9 +140,9 @@ class PPOModel:
set_global_seeds(None)
assert isinstance(self.model_config.lr, float)
self.lr = constfn(self.model_config.lr)
self.lr = _constfn(self.model_config.lr)
assert isinstance(self.model_config.cliprange, float)
self.cliprange = constfn(self.model_config.cliprange)
self.cliprange = _constfn(self.model_config.cliprange)
# build lstm policy network, value share the same network
policy = build_lstm_policy(model_config)
......@@ -165,12 +167,28 @@ class PPOModel:
def inference(self, num):
"""
generate actions along with related info from policy network.
Generate actions along with related info from policy network.
observation is the action of the last step.
Parameters:
Parameters
----------
num: the number of trials to generate
num: int
The number of trials to generate
Returns
-------
mb_obs : list
Observation of the ``num`` configurations
mb_actions : list
Actions of the ``num`` configurations
mb_values : list
Values from the value function of the ``num`` configurations
mb_neglogpacs : list
``neglogp`` of the ``num`` configurations
mb_dones : list
To show whether the play is done, always ``True``
last_values : tensorflow tensor
The last values of the ``num`` configurations, got with session run
"""
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], []
......@@ -212,13 +230,15 @@ class PPOModel:
def compute_rewards(self, trials_info, trials_result):
"""
compute the rewards of the trials in trials_info based on trials_result,
Compute the rewards of the trials in trials_info based on trials_result,
and update the rewards in trials_info
Parameters:
Parameters
----------
trials_info: info of the generated trials
trials_result: final results (e.g., acc) of the generated trials
trials_info : TrialsInfo
Info of the generated trials
trials_result : list
Final results (e.g., acc) of the generated trials
"""
mb_rewards = np.asarray([trials_result for _ in trials_info.actions], dtype=np.float32)
# discount/bootstrap off value fn
......@@ -243,12 +263,14 @@ class PPOModel:
def train(self, trials_info, nenvs):
"""
train the policy/value network using trials_info
Train the policy/value network using trials_info
Parameters:
Parameters
----------
trials_info: complete info of the generated trials from the previous inference
nenvs: the batch size of the (previous) inference
trials_info : TrialsInfo
Complete info of the generated trials from the previous inference
nenvs : int
The batch size of the (previous) inference
"""
# keep frac decay for future optimization
if self.cur_update <= self.nupdates:
......@@ -282,27 +304,40 @@ class PPOModel:
class PPOTuner(Tuner):
"""
PPOTuner
PPOTuner, the implementation inherits the main logic of the implementation
[ppo2 from openai](https://github.com/openai/baselines/tree/master/baselines/ppo2), and is adapted for NAS scenario.
It uses ``lstm`` for its policy network and value network, policy and value share the same network.
"""
def __init__(self, optimize_mode, trials_per_update=20, epochs_per_update=4, minibatch_size=4,
ent_coef=0.0, lr=3e-4, vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95, cliprange=0.2):
"""
initialization, PPO model is not initialized here as search space is not received yet.
Initialization, PPO model is not initialized here as search space is not received yet.
Parameters:
Parameters
----------
optimize_mode: maximize or minimize
trials_per_update: number of trials to have for each model update
epochs_per_update: number of epochs to run for each model update
minibatch_size: minibatch size (number of trials) for the update
ent_coef: policy entropy coefficient in the optimization objective
lr: learning rate of the model (lstm network), constant
vf_coef: value function loss coefficient in the optimization objective
max_grad_norm: gradient norm clipping coefficient
gamma: discounting factor
lam: advantage estimation discounting factor (lambda in the paper)
cliprange: cliprange in the PPO algorithm, constant
optimize_mode : str
maximize or minimize
trials_per_update : int
Number of trials to have for each model update
epochs_per_update : int
Number of epochs to run for each model update
minibatch_size : int
Minibatch size (number of trials) for the update
ent_coef : float
Policy entropy coefficient in the optimization objective
lr : float
Learning rate of the model (lstm network), constant
vf_coef : float
Value function loss coefficient in the optimization objective
max_grad_norm : float
Gradient norm clipping coefficient
gamma : float
Discounting factor
lam : float
Advantage estimation discounting factor (lambda in the paper)
cliprange : float
Cliprange in the PPO algorithm, constant
"""
self.optimize_mode = OptimizeMode(optimize_mode)
self.model_config = ModelConfig()
......@@ -330,21 +365,25 @@ class PPOTuner(Tuner):
self.model_config.nminibatches = minibatch_size
self.send_trial_callback = None
logger.info('=== finished PPOTuner initialization')
logger.info('Finished PPOTuner initialization')
def _process_one_nas_space(self, block_name, block_space):
"""
process nas space to determine observation space and action space
Process nas space to determine observation space and action space
Parameters:
Parameters
----------
block_name: the name of the mutable block
block_space: search space of this mutable block
block_name : str
The name of the mutable block
block_space : dict
Search space of this mutable block
Returns:
----------
actions_spaces: list of the space of each action
actions_to_config: the mapping from action to generated configuration
Returns
-------
actions_spaces : list
List of the space of each action
actions_to_config : list
The mapping from action to generated configuration
"""
actions_spaces = []
actions_to_config = []
......@@ -385,7 +424,7 @@ class PPOTuner(Tuner):
def _process_nas_space(self, search_space):
"""
process nas search space to get action/observation space
Process nas search space to get action/observation space
"""
actions_spaces = []
actions_to_config = []
......@@ -412,7 +451,7 @@ class PPOTuner(Tuner):
def _generate_action_mask(self):
"""
different step could have different action space. to deal with this case, we merge all the
Different step could have different action space. to deal with this case, we merge all the
possible actions into one action space, and use mask to indicate available actions for each step
"""
two_masks = []
......@@ -439,15 +478,13 @@ class PPOTuner(Tuner):
def update_search_space(self, search_space):
"""
get search space, currently the space only includes that for NAS
Get search space, currently the space only includes that for NAS
Parameters:
Parameters
----------
search_space: search space for NAS
Returns:
-------
no return
search_space : dict
Search space for NAS
the format could be referred to search space spec (https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html).
"""
logger.info('=== update search space %s', search_space)
assert self.search_space is None
......@@ -470,7 +507,7 @@ class PPOTuner(Tuner):
def _actions_to_config(self, actions):
"""
given actions, to generate the corresponding trial configuration
Given actions, to generate the corresponding trial configuration
"""
chosen_arch = copy.deepcopy(self.chosen_arch_template)
for cnt, act in enumerate(actions):
......@@ -490,6 +527,19 @@ class PPOTuner(Tuner):
def generate_multiple_parameters(self, parameter_id_list, **kwargs):
"""
Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects.
Parameters
----------
parameter_id_list : list of int
Unique identifiers for each set of requested hyper-parameters.
These will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
-------
list
A list of newly generated configurations
"""
result = []
self.send_trial_callback = kwargs['st_callback']
......@@ -506,7 +556,17 @@ class PPOTuner(Tuner):
def generate_parameters(self, parameter_id, **kwargs):
"""
generate parameters, if no trial configration for now, self.credit plus 1 to send the config later
Generate parameters, if no trial configration for now, self.credit plus 1 to send the config later
parameter_id : int
Unique identifier for requested hyper-parameters. This will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
-------
dict
One newly generated configuration
"""
if self.first_inf:
self.trials_result = [None for _ in range(self.inf_batch_size)]
......@@ -527,6 +587,7 @@ class PPOTuner(Tuner):
def _next_round_inference(self):
"""
Run a inference to generate next batch of configurations
"""
self.finished_trials = 0
self.model.compute_rewards(self.trials_info, self.trials_result)
......@@ -554,8 +615,17 @@ class PPOTuner(Tuner):
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
"""
receive trial's result. if the number of finished trials equals self.inf_batch_size, start the next update to
train the model
Receive trial's result. if the number of finished trials equals self.inf_batch_size, start the next update to
train the model.
Parameters
----------
parameter_id : int
Unique identifier of used hyper-parameters, same with :meth:`generate_parameters`.
parameters : dict
Hyper-parameters generated by :meth:`generate_parameters`.
value : dict
Result from trial (the return value of :func:`nni.report_final_result`).
"""
trial_info_idx = self.running_trials.pop(parameter_id, None)
assert trial_info_idx is not None
......@@ -572,7 +642,17 @@ class PPOTuner(Tuner):
def trial_end(self, parameter_id, success, **kwargs):
"""
to deal with trial failure
To deal with trial failure. If a trial fails, it is popped out from ``self.running_trials``,
and the final result of this trial is assigned with the average of the finished trials.
Parameters
----------
parameter_id : int
Unique identifier for hyper-parameters used by this trial.
success : bool
True if the trial successfully completed; False if failed or terminated.
**kwargs
Not used
"""
if not success:
if parameter_id not in self.running_trials:
......@@ -582,7 +662,7 @@ class PPOTuner(Tuner):
assert trial_info_idx is not None
# use mean of finished trials as the result of this failed trial
values = [val for val in self.trials_result if val is not None]
logger.warning('zql values: %s', values)
logger.warning('In trial_end, values: %s', values)
self.trials_result[trial_info_idx] = (sum(values) / len(values)) if values else 0
self.finished_trials += 1
if self.finished_trials == self.inf_batch_size:
......@@ -590,10 +670,11 @@ class PPOTuner(Tuner):
def import_data(self, data):
"""
Import additional data for tuning
Import additional data for tuning, not supported yet.
Parameters
----------
data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'
data : list
A list of dictionarys, each of which has at least two keys, ``parameter`` and ``value``
"""
logger.warning('PPOTuner cannot leverage imported data.')
......@@ -94,12 +94,14 @@ def lstm_model(nlstm=128, layer_norm=False):
An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example
Parameters:
Parameters
----------
nlstm: int LSTM hidden state size
layer_norm: bool if True, layer-normalized version of LSTM is used
nlstm : int
LSTM hidden state size
layer_norm : bool
if True, layer-normalized version of LSTM is used
Returns:
Returns
-------
function that builds LSTM with a given input tensor / placeholder
"""
......@@ -171,11 +173,15 @@ def adjust_shape(placeholder, data):
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder: tensorflow input placeholder
data: input data to be (potentially) reshaped to be fed into placeholder
Parameters
----------
placeholder
tensorflow input placeholder
data
input data to be (potentially) reshaped to be fed into placeholder
Returns:
Returns
-------
reshaped data
"""
if not isinstance(data, np.ndarray) and not isinstance(data, list):
......@@ -230,13 +236,16 @@ def observation_placeholder(ob_space, batch_size=None, name='Ob'):
"""
Create placeholder to feed observations into of the size appropriate to the observation space
Parameters:
Parameters
----------
ob_space: gym.Space observation space
batch_size: int size of the batch to be fed into input. Can be left None in most cases.
name: str name of the placeholder
Returns:
ob_space : gym.Space
observation space
batch_size : int
size of the batch to be fed into input. Can be left None in most cases.
name : str
name of the placeholder
Returns
-------
tensorflow placeholder tensor
"""
......
......@@ -24,11 +24,14 @@ import numpy as np
def get_json_content(file_path):
"""Load json file content
"""
Load json file content
Parameters
----------
file_path:
path to the file
Raises
------
TypeError
......@@ -43,7 +46,8 @@ def get_json_content(file_path):
def generate_pcs(nni_search_space_content):
"""Generate the Parameter Configuration Space (PCS) which defines the
"""
Generate the Parameter Configuration Space (PCS) which defines the
legal ranges of the parameters to be optimized and their default values.
Generally, the format is:
# parameter_name categorical {value_1, ..., value_N} [default value]
......@@ -53,14 +57,17 @@ def generate_pcs(nni_search_space_content):
# parameter_name real [min_value, max_value] [default value]
# parameter_name real [min_value, max_value] [default value] log
Reference: https://automl.github.io/SMAC3/stable/options.html
Parameters
----------
nni_search_space_content: search_space
The search space in this experiment in nni
Returns
-------
Parameter Configuration Space (PCS)
the legal ranges of the parameters to be optimized and their default values
Raises
------
RuntimeError
......@@ -122,7 +129,8 @@ def generate_pcs(nni_search_space_content):
def generate_scenario(ss_content):
"""Generate the scenario. The scenario-object (smac.scenario.scenario.Scenario) is used to configure SMAC and
"""
Generate the scenario. The scenario-object (smac.scenario.scenario.Scenario) is used to configure SMAC and
can be constructed either by providing an actual scenario-object, or by specifing the options in a scenario file.
Reference: https://automl.github.io/SMAC3/stable/options.html
The format of the scenario file is one option per line:
......@@ -191,6 +199,7 @@ def generate_scenario(ss_content):
wallclock_limit: int
Maximum amount of wallclock-time used for optimization. Default: inf.
Use default because this is controlled by nni
Returns
-------
Scenario:
......
......@@ -40,14 +40,18 @@ from nni.utils import OptimizeMode, extract_scalar_reward
from .convert_ss_to_scenario import generate_scenario
class SMACTuner(Tuner):
"""
This is a wrapper of [SMAC](https://github.com/automl/SMAC3) following NNI tuner interface.
It only supports ``SMAC`` mode, and does not support the multiple instances of SMAC3 (i.e.,
the same configuration is run multiple times).
"""
def __init__(self, optimize_mode="maximize"):
"""
Parameters
----------
optimize_mode: str
optimize mode, 'maximize' or 'minimize', by default 'maximize'
optimize_mode : str
Optimize mode, 'maximize' or 'minimize', by default 'maximize'
"""
def __init__(self, optimize_mode="maximize"):
"""Constructor"""
self.logger = logging.getLogger(
self.__module__ + "." + self.__class__.__name__)
self.optimize_mode = OptimizeMode(optimize_mode)
......@@ -61,11 +65,14 @@ class SMACTuner(Tuner):
self.cs = None
def _main_cli(self):
"""Main function of SMAC for CLI interface
"""
Main function of SMAC for CLI interface. Some initializations of the wrapped SMAC are done
in this function.
Returns
-------
instance
optimizer
obj
The object of the SMAC optimizer
"""
self.logger.info("SMAC call: %s", " ".join(sys.argv))
......@@ -126,20 +133,23 @@ class SMACTuner(Tuner):
def update_search_space(self, search_space):
"""
NOTE: updating search space is not supported.
Convert search_space to the format that ``SMAC3`` could recognize, thus, not all the search space types
are supported. In this function, we also do the initialization of `SMAC3`, i.e., calling ``self._main_cli``.
NOTE: updating search space during experiment running is not supported.
Parameters
----------
search_space: dict
search space
search_space : dict
The format could be referred to search space spec (https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html).
"""
# TODO: this is ugly, we put all the initialization work in this method, because initialization relies
# on search space, also because update_search_space is called at the beginning.
if not self.update_ss_done:
self.categorical_dict = generate_scenario(search_space)
if self.categorical_dict is None:
raise RuntimeError('categorical dict is not correctly returned after parsing search space.')
# TODO: this is ugly, we put all the initialization work in this method, because initialization relies
# on search space, also because update_search_space is called at the beginning.
self.optimizer = self._main_cli()
self.smbo_solver = self.optimizer.solver
self.loguniform_key = {key for key in search_space.keys() if search_space[key]['_type'] == 'loguniform'}
......@@ -148,19 +158,23 @@ class SMACTuner(Tuner):
self.logger.warning('update search space is not supported.')
def receive_trial_result(self, parameter_id, parameters, value, **kwargs):
"""receive_trial_result
"""
Receive a trial's final performance result reported through :func:``nni.report_final_result`` by the trial.
GridSearchTuner does not need trial's results.
Parameters
----------
parameter_id: int
parameter id
parameters:
parameters
value:
value
parameter_id : int
Unique identifier of used hyper-parameters, same with :meth:`generate_parameters`.
parameters : dict
Hyper-parameters generated by :meth:`generate_parameters`.
value : dict
Result from trial (the return value of :func:`nni.report_final_result`).
Raises
------
RuntimeError
Received parameter id not in total_data
Received parameter id not in ``self.total_data``
"""
reward = extract_scalar_reward(value)
if self.optimize_mode is OptimizeMode.Maximize:
......@@ -176,14 +190,16 @@ class SMACTuner(Tuner):
def param_postprocess(self, challenger_dict):
"""
Postprocessing for a set of parameter includes:
1. Convert the values of type `loguniform` back to their initial range.
2. Convert categorical: categorical values in search space are changed to list of numbers before,
Postprocessing for a set of hyperparameters includes:
1. Convert the values of type ``loguniform`` back to their initial range.
2. Convert ``categorical``: categorical values in search space are changed to list of numbers before,
those original values will be changed back in this function.
Parameters
----------
challenger_dict: dict
challenger_dict : dict
challenger dict
Returns
-------
dict
......@@ -203,15 +219,21 @@ class SMACTuner(Tuner):
return converted_dict
def generate_parameters(self, parameter_id, **kwargs):
"""generate one instance of hyperparameters
"""
Generate one instance of hyperparameters (i.e., one configuration).
Get one from SMAC3's ``challengers``.
Parameters
----------
parameter_id: int
parameter id
parameter_id : int
Unique identifier for requested hyper-parameters. This will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
-------
list
new generated parameters
dict
One newly generated configuration
"""
if self.first_one:
init_challenger = self.smbo_solver.nni_smac_start()
......@@ -224,15 +246,23 @@ class SMACTuner(Tuner):
return self.param_postprocess(challenger.get_dictionary())
def generate_multiple_parameters(self, parameter_id_list, **kwargs):
"""generate mutiple instances of hyperparameters
"""
Generate mutiple instances of hyperparameters. If it is a first request,
retrieve the instances from initial challengers. While if it is not, request
new challengers and retrieve instances from the requested challengers.
Parameters
----------
parameter_id_list: list
list of parameter id
parameter_id_list: list of int
Unique identifiers for each set of requested hyper-parameters.
These will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
-------
list
list of new generated parameters
a list of newly generated configurations
"""
if self.first_one:
params = []
......@@ -254,11 +284,12 @@ class SMACTuner(Tuner):
def import_data(self, data):
"""
Import additional data for tuning
Import additional data for tuning.
Parameters
----------
data: list of dict
Each of which has at least two keys, `parameter` and `value`.
data : list of dict
Each of which has at least two keys, ``parameter`` and ``value``.
"""
_completed_num = 0
for trial_info in data:
......
......@@ -43,8 +43,18 @@ _sequence_id = platform.get_sequence_id()
def get_next_parameter():
"""Returns a set of (hyper-)paremeters generated by Tuner.
Returns None if no more (hyper-)parameters can be generated by Tuner."""
"""
Get the hyper paremeters generated by tuner. For a multiphase experiment, it returns a new group of hyper
parameters at each call of get_next_parameter. For a non-multiphase (multiPhase is not configured or set to False)
experiment, it returns hyper parameters only on the first call for each trial job, it returns None since second call.
This API should be called only once in each trial job of an experiment which is not specified as multiphase.
Returns
-------
dict
A dict object contains the hyper parameters generated by tuner, the keys of the dict are defined in
search space. Returns None if no more hyper parameters can be generated by tuner.
"""
global _params
_params = platform.get_next_parameter()
if _params is None:
......@@ -52,6 +62,15 @@ def get_next_parameter():
return _params['parameters']
def get_current_parameter(tag=None):
"""
Get current hyper parameters generated by tuner. It returns the same group of hyper parameters as the last
call of get_next_parameter returns.
Parameters
----------
tag: str
hyper parameter key
"""
global _params
if _params is None:
return None
......@@ -60,19 +79,51 @@ def get_current_parameter(tag=None):
return _params['parameters'][tag]
def get_experiment_id():
"""
Get experiment ID.
Returns
-------
str
Identifier of current experiment
"""
return _experiment_id
def get_trial_id():
"""
Get trial job ID which is string identifier of a trial job, for example 'MoXrp'. In one experiment, each trial
job has an unique string ID.
Returns
-------
str
Identifier of current trial job which is calling this API.
"""
return _trial_id
def get_sequence_id():
"""
Get trial job sequence nubmer. A sequence number is an integer value assigned to each trial job base on the
order they are submitted, incremental starting from 0. In one experiment, both trial job ID and sequence number
are unique for each trial job, they are of different data types.
Returns
-------
int
Sequence number of current trial job which is calling this API.
"""
return _sequence_id
_intermediate_seq = 0
def report_intermediate_result(metric):
"""Reports intermediate result to Assessor.
metric: serializable object.
"""
Reports intermediate result to NNI.
Parameters
----------
metric:
serializable object.
"""
global _intermediate_seq
assert _params is not None, 'nni.get_next_parameter() needs to be called before report_intermediate_result'
......@@ -88,8 +139,13 @@ def report_intermediate_result(metric):
def report_final_result(metric):
"""Reports final result to tuner.
metric: serializable object.
"""
Reports final result to NNI.
Parameters
----------
metric:
serializable object.
"""
assert _params is not None, 'nni.get_next_parameter() needs to be called before report_final_result'
metric = json_tricks.dumps({
......
......@@ -76,10 +76,12 @@ class Tuner(Recoverable):
Builtin tuners:
:class:`~nni.hyperopt_tuner.hyperopt_tuner.HyperoptTuner`
:class:`~nni.evolution_tuner.evolution_tuner.EvolutionTuner`
:class:`~nni.smac_tuner.smac_tuner.SMACTuner`
:class:`~nni.gridsearch_tuner.gridsearch_tuner.GridSearchTuner`
:class:`~nni.smac_tuner.SMACTuner`
:class:`~nni.gridsearch_tuner.GridSearchTuner`
:class:`~nni.networkmorphism_tuner.networkmorphism_tuner.NetworkMorphismTuner`
:class:`~nni.metis_tuner.mets_tuner.MetisTuner`
:class:`~nni.ppo_tuner.PPOTuner`
:class:`~nni.gp_tuner.gp_tuner.GPTuner`
"""
def generate_parameters(self, parameter_id, **kwargs):
......
......@@ -2,81 +2,26 @@ from unittest import TestCase, main
import tensorflow as tf
import torch
import torch.nn.functional as F
import nni.compression.tensorflow as tf_compressor
import nni.compression.torch as torch_compressor
def weight_variable(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.1))
def bias_variable(shape):
return tf.Variable(tf.constant(0.1, shape=shape))
def conv2d(x_input, w_matrix):
return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME')
def max_pool(x_input, pool_size):
size = [1, pool_size, pool_size, 1]
return tf.nn.max_pool(x_input, ksize=size, strides=size, padding='SAME')
class TfMnist:
def __init__(self):
images = tf.placeholder(tf.float32, [None, 784], name='input_x')
labels = tf.placeholder(tf.float32, [None, 10], name='input_y')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
self.images = images
self.labels = labels
self.keep_prob = keep_prob
self.train_step = None
self.accuracy = None
self.w1 = None
self.b1 = None
self.fcw1 = None
self.cross = None
with tf.name_scope('reshape'):
x_image = tf.reshape(images, [-1, 28, 28, 1])
with tf.name_scope('conv1'):
w_conv1 = weight_variable([5, 5, 1, 32])
self.w1 = w_conv1
b_conv1 = bias_variable([32])
self.b1 = b_conv1
h_conv1 = tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1)
with tf.name_scope('pool1'):
h_pool1 = max_pool(h_conv1, 2)
with tf.name_scope('conv2'):
w_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2)
with tf.name_scope('pool2'):
h_pool2 = max_pool(h_conv2, 2)
with tf.name_scope('fc1'):
w_fc1 = weight_variable([7 * 7 * 64, 1024])
self.fcw1 = w_fc1
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1)
with tf.name_scope('dropout'):
h_fc1_drop = tf.nn.dropout(h_fc1, 0.5)
with tf.name_scope('fc2'):
w_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2
with tf.name_scope('loss'):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=y_conv))
self.cross = cross_entropy
with tf.name_scope('adam_optimizer'):
self.train_step = tf.train.AdamOptimizer(0.0001).minimize(cross_entropy)
with tf.name_scope('accuracy'):
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
if tf.__version__ >= '2.0':
import nni.compression.tensorflow as tf_compressor
def get_tf_mnist_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters=32, kernel_size=7, input_shape=[28, 28, 1], activation='relu', padding="SAME"),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation='relu', padding="SAME"),
tf.keras.layers.MaxPooling2D(pool_size=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=128, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(units=10, activation='softmax'),
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=tf.keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
return model
class TorchMnist(torch.nn.Module):
def __init__(self):
......@@ -96,25 +41,47 @@ class TorchMnist(torch.nn.Module):
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def tf2(func):
def test_tf2_func(self):
if tf.__version__ >= '2.0':
func()
return test_tf2_func
class CompressorTestCase(TestCase):
def test_tf_pruner(self):
model = TfMnist()
configure_list = [{'sparsity': 0.8, 'op_types': ['default']}]
tf_compressor.LevelPruner(tf.get_default_graph(), configure_list).compress()
def test_tf_quantizer(self):
model = TfMnist()
tf_compressor.NaiveQuantizer(tf.get_default_graph(), [{'op_types': ['default']}]).compress()
def test_torch_pruner(self):
model = TorchMnist()
configure_list = [{'sparsity': 0.8, 'op_types': ['default']}]
torch_compressor.LevelPruner(model, configure_list).compress()
def test_torch_fpgm_pruner(self):
model = TorchMnist()
configure_list = [{'sparsity': 0.5, 'op_types': ['Conv2d']}]
torch_compressor.FPGMPruner(model, configure_list).compress()
def test_torch_quantizer(self):
model = TorchMnist()
torch_compressor.NaiveQuantizer(model, [{'op_types': ['default']}]).compress()
configure_list = [{
'quant_types': ['weight'],
'quant_bits': {
'weight': 8,
},
'op_types':['Conv2d', 'Linear']
}]
torch_compressor.NaiveQuantizer(model, configure_list).compress()
@tf2
def test_tf_pruner(self):
configure_list = [{'sparsity': 0.8, 'op_types': ['default']}]
tf_compressor.LevelPruner(get_tf_mnist_model(), configure_list).compress()
@tf2
def test_tf_quantizer(self):
tf_compressor.NaiveQuantizer(get_tf_mnist_model(), [{'op_types': ['default']}]).compress()
@tf2
def test_tf_fpgm_pruner(self):
configure_list = [{'sparsity': 0.5, 'op_types': ['Conv2D']}]
tf_compressor.FPGMPruner(get_tf_mnist_model(), configure_list).compress()
if __name__ == '__main__':
......
jobs:
- job: 'pip_install_ubuntu_python36'
pool:
vmImage: 'ubuntu-18.04'
strategy:
matrix:
Python36:
PYTHON_VERSION: '3.6'
steps:
- script: |
python3 -V
python3 -m pip install --upgrade pip setuptools --user
python3 -m pip install --upgrade nni --user
displayName: 'Install nni'
- job: 'pip_install_macOS_python36'
pool:
vmImage: 'macOS-10.13'
strategy:
matrix:
Python36:
PYTHON_VERSION: '3.6'
steps:
- script: |
python3 -V
python3 -m pip install --upgrade pip setuptools --user
python3 -m pip install --upgrade nni --user
displayName: 'Install nni'
- job: 'pip_install_windows_python36'
pool:
vmImage: 'windows-latest'
strategy:
matrix:
Python36:
PYTHON_VERSION: '3.6'
steps:
- script: |
python -V
python -m pip install --upgrade pip setuptools --user
python -m pip install --upgrade nni --user
displayName: 'Install nni'
......@@ -72,7 +72,7 @@ class Experiments:
self.experiment_file = os.path.join(NNICTL_HOME_DIR, '.experiment')
self.experiments = self.read_file()
def add_experiment(self, expId, port, time, file_name, platform):
def add_experiment(self, expId, port, time, file_name, platform, experiment_name):
'''set {key:value} paris to self.experiment'''
self.experiments[expId] = {}
self.experiments[expId]['port'] = port
......@@ -81,6 +81,7 @@ class Experiments:
self.experiments[expId]['status'] = 'INITIALIZED'
self.experiments[expId]['fileName'] = file_name
self.experiments[expId]['platform'] = platform
self.experiments[expId]['experimentName'] = experiment_name
self.write_file()
def update_experiment(self, expId, key, value):
......
......@@ -66,7 +66,7 @@ EXPERIMENT_INFORMATION_FORMAT = '-----------------------------------------------
'%s\n' \
'----------------------------------------------------------------------------------------\n'
EXPERIMENT_DETAIL_FORMAT = 'Id: %s Status: %s Port: %s Platform: %s StartTime: %s EndTime: %s \n'
EXPERIMENT_DETAIL_FORMAT = 'Id: %s Name: %s Status: %s Port: %s Platform: %s StartTime: %s EndTime: %s\n'
EXPERIMENT_MONITOR_INFO = 'Id: %s Status: %s Port: %s Platform: %s \n' \
'StartTime: %s Duration: %s'
......
......@@ -478,10 +478,11 @@ def launch_experiment(args, experiment_config, mode, config_file_name, experimen
web_ui_url_list = get_local_urls(args.port)
nni_config.set_config('webuiUrl', web_ui_url_list)
#save experiment information
# save experiment information
nnictl_experiment_config = Experiments()
nnictl_experiment_config.add_experiment(experiment_id, args.port, start_time, config_file_name,\
experiment_config['trainingServicePlatform'])
nnictl_experiment_config.add_experiment(experiment_id, args.port, start_time, config_file_name,
experiment_config['trainingServicePlatform'],
experiment_config['experimentName'])
print_normal(EXPERIMENT_SUCCESS_INFO % (experiment_id, ' '.join(web_ui_url_list)))
......
......@@ -99,9 +99,13 @@ def check_experiment_id(args, update=True):
print_error('There are multiple experiments, please set the experiment id...')
experiment_information = ""
for key in running_experiment_list:
experiment_information += (EXPERIMENT_DETAIL_FORMAT % (key, experiment_dict[key]['status'], \
experiment_dict[key]['port'], experiment_dict[key].get('platform'), experiment_dict[key]['startTime'],\
experiment_dict[key]['endTime']))
experiment_information += EXPERIMENT_DETAIL_FORMAT % (key,
experiment_dict[key].get('experimentName', 'N/A'),
experiment_dict[key]['status'],
experiment_dict[key]['port'],
experiment_dict[key].get('platform'),
experiment_dict[key]['startTime'],
experiment_dict[key]['endTime'])
print(EXPERIMENT_INFORMATION_FORMAT % experiment_information)
exit(1)
elif not running_experiment_list:
......@@ -155,9 +159,13 @@ def parse_ids(args):
print_error('There are multiple experiments, please set the experiment id...')
experiment_information = ""
for key in running_experiment_list:
experiment_information += (EXPERIMENT_DETAIL_FORMAT % (key, experiment_dict[key]['status'], \
experiment_dict[key]['port'], experiment_dict[key].get('platform'), experiment_dict[key]['startTime'], \
experiment_dict[key]['endTime']))
experiment_information += EXPERIMENT_DETAIL_FORMAT % (key,
experiment_dict[key].get('experimentName', 'N/A'),
experiment_dict[key]['status'],
experiment_dict[key]['port'],
experiment_dict[key].get('platform'),
experiment_dict[key]['startTime'],
experiment_dict[key]['endTime'])
print(EXPERIMENT_INFORMATION_FORMAT % experiment_information)
exit(1)
else:
......@@ -573,8 +581,13 @@ def experiment_list(args):
print_warning('There is no experiment running...\nYou can use \'nnictl experiment list --all\' to list all experiments.')
experiment_information = ""
for key in experiment_id_list:
experiment_information += (EXPERIMENT_DETAIL_FORMAT % (key, experiment_dict[key]['status'], experiment_dict[key]['port'],\
experiment_dict[key].get('platform'), experiment_dict[key]['startTime'], experiment_dict[key]['endTime']))
experiment_information += EXPERIMENT_DETAIL_FORMAT % (key,
experiment_dict[key].get('experimentName', 'N/A'),
experiment_dict[key]['status'],
experiment_dict[key]['port'],
experiment_dict[key].get('platform'),
experiment_dict[key]['startTime'],
experiment_dict[key]['endTime'])
print(EXPERIMENT_INFORMATION_FORMAT % experiment_information)
def get_time_interval(time1, time2):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment