Commit 64e8b686 authored by Shufan Huang's avatar Shufan Huang Committed by QuanluZhang
Browse files

Extract common functions in sdk ("extract_scalar_reward") (#967)

create utils.py, put extract_scalar_reward to it and remove others
parent 9b9c974b
......@@ -24,6 +24,7 @@ import random
import numpy as np
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
logger = logging.getLogger('ga_customer_tuner')
......@@ -115,7 +116,7 @@ class CustomerTuner(Tuner):
parameters : dict of parameters
value: final metrics of the trial, including reward
'''
reward = self.extract_scalar_reward(value)
reward = extract_scalar_reward(value)
if self.optimize_mode is OptimizeMode.Minimize:
reward = -reward
......
......@@ -25,6 +25,7 @@ import os
from threading import Event, Lock, current_thread
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
from graph import Graph, Layer, LayerType, Enum, graph_dumps, graph_loads, unique
......@@ -205,7 +206,7 @@ class CustomerTuner(Tuner):
logger.debug('acquiring lock for param {}'.format(parameter_id))
self.thread_lock.acquire()
logger.debug('lock for current acquired')
reward = self.extract_scalar_reward(value)
reward = extract_scalar_reward(value)
if self.optimize_mode is OptimizeMode.Minimize:
reward = -reward
......
......@@ -31,6 +31,7 @@ import random
import numpy as np
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
from .. import parameter_expressions
......@@ -287,7 +288,7 @@ class EvolutionTuner(Tuner):
if value is dict, it should have "default" key.
value is final metrics of the trial.
'''
reward = self.extract_scalar_reward(value)
reward = extract_scalar_reward(value)
if parameter_id not in self.total_data:
raise RuntimeError('Received parameter_id not in total_data.')
# restore the paramsters contains "_index"
......
......@@ -32,6 +32,7 @@ import json_tricks
from nni.protocol import CommandType, send
from nni.msg_dispatcher_base import MsgDispatcherBase
from nni.common import init_logger
from nni.utils import extract_scalar_reward
from .. import parameter_expressions
_logger = logging.getLogger(__name__)
......@@ -268,22 +269,6 @@ class Bracket():
self.num_configs_to_run.append(len(hyper_configs))
self.increase_i()
def extract_scalar_reward(value, scalar_key='default'):
"""
Raises
------
RuntimeError
Incorrect final result: the final result should be float/int,
or a dict which has a key named "default" whose value is float/int.
"""
if isinstance(value, float) or isinstance(value, int):
reward = value
elif isinstance(value, dict) and scalar_key in value and isinstance(value[scalar_key], (float, int)):
reward = value[scalar_key]
else:
raise RuntimeError('Incorrect final result: the final result for %s should be float/int, or a dict which has a key named "default" whose value is float/int.' % str(self.__class__))
return reward
class Hyperband(MsgDispatcherBase):
"""Hyperband inherit from MsgDispatcherBase rather than Tuner, because it integrates both tuner's functions and assessor's functions.
This is an implementation that could fully leverage available resources, i.e., high parallelism.
......
......@@ -29,6 +29,7 @@ import numpy as np
import hyperopt as hp
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
logger = logging.getLogger('hyperopt_AutoML')
......@@ -241,7 +242,7 @@ class HyperoptTuner(Tuner):
if value is dict, it should have "default" key.
value is final metrics of the trial.
"""
reward = self.extract_scalar_reward(value)
reward = extract_scalar_reward(value)
# restore the paramsters contains '_index'
if parameter_id not in self.total_data:
raise RuntimeError('Received parameter_id not in total_data.')
......
......@@ -38,6 +38,7 @@ import nni.metis_tuner.Regression_GP.OutlierDetection as gp_outlier_detection
import nni.metis_tuner.Regression_GP.Prediction as gp_prediction
import nni.metis_tuner.Regression_GP.Selection as gp_selection
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
logger = logging.getLogger("Metis_Tuner_AutoML")
......@@ -220,7 +221,7 @@ class MetisTuner(Tuner):
value : dict/float
if value is dict, it should have "default" key.
"""
value = self.extract_scalar_reward(value)
value = extract_scalar_reward(value)
if self.optimize_mode == OptimizeMode.Maximize:
value = -value
......
......@@ -23,6 +23,7 @@ import os
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
from nni.networkmorphism_tuner.bayesian import BayesianOptimizer
from nni.networkmorphism_tuner.nn import CnnGenerator, MlpGenerator
from nni.networkmorphism_tuner.utils import Constant, OptimizeMode
......@@ -161,7 +162,7 @@ class NetworkMorphismTuner(Tuner):
value : dict/float
if value is dict, it should have "default" key.
"""
reward = self.extract_scalar_reward(value)
reward = extract_scalar_reward(value)
if parameter_id not in self.total_data:
raise RuntimeError("Received parameter_id not in total_data.")
......
......@@ -22,6 +22,7 @@ smac_tuner.py
"""
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
import sys
import logging
......@@ -166,7 +167,7 @@ class SMACTuner(Tuner):
RuntimeError
Received parameter id not in total_data
"""
reward = self.extract_scalar_reward(value)
reward = extract_scalar_reward(value)
if self.optimize_mode is OptimizeMode.Maximize:
reward = -reward
......
......@@ -103,12 +103,3 @@ class Tuner(Recoverable):
def _on_error(self):
pass
def extract_scalar_reward(self, value, scalar_key='default'):
if isinstance(value, float) or isinstance(value, int):
reward = value
elif isinstance(value, dict) and scalar_key in value and isinstance(value[scalar_key], (float, int)):
reward = value[scalar_key]
else:
raise RuntimeError('Incorrect final result: the final result for %s should be float/int, or a dict which has a key named "default" whose value is float/int.' % str(self.__class__))
return reward
\ No newline at end of file
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
def extract_scalar_reward(value, scalar_key='default'):
"""
Raises
------
RuntimeError
Incorrect final result: the final result should be float/int,
or a dict which has a key named "default" whose value is float/int.
"""
if isinstance(value, float) or isinstance(value, int):
reward = value
elif isinstance(value, dict) and scalar_key in value and isinstance(value[scalar_key], (float, int)):
reward = value[scalar_key]
else:
raise RuntimeError('Incorrect final result: the final result for %s should be float/int, or a dict which has a key named "default" whose value is float/int.' % str(self.__class__))
return reward
\ No newline at end of file
......@@ -23,6 +23,7 @@ import nni.protocol
from nni.protocol import CommandType, send, receive
from nni.tuner import Tuner
from nni.msg_dispatcher import MsgDispatcher
from nni.utils import extract_scalar_reward
from io import BytesIO
import json
from unittest import TestCase, main
......@@ -45,11 +46,11 @@ class NaiveTuner(Tuner):
}
def receive_trial_result(self, parameter_id, parameters, value):
reward = self.extract_scalar_reward(value)
reward = extract_scalar_reward(value)
self.trial_results.append((parameter_id, parameters['param'], reward, False))
def receive_customized_trial_result(self, parameter_id, parameters, value):
reward = self.extract_scalar_reward(value)
reward = extract_scalar_reward(value)
self.trial_results.append((parameter_id, parameters['param'], reward, True))
def update_search_space(self, search_space):
......
......@@ -3,6 +3,7 @@ import logging
import os
from nni.tuner import Tuner
from nni.utils import extract_scalar_reward
_logger = logging.getLogger('NaiveTuner')
_logger.info('start')
......@@ -21,7 +22,7 @@ class NaiveTuner(Tuner):
return { 'x': self.cur }
def receive_trial_result(self, parameter_id, parameters, value):
reward = self.extract_scalar_reward(value)
reward = extract_scalar_reward(value)
_logger.info('receive trial result: %s, %s, %s' % (parameter_id, parameters, reward))
_result.write('%d %d\n' % (parameters['x'], reward))
_result.flush()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment