"git@developer.sourcefind.cn:OpenDAS/nni.git" did not exist on "cc5218e55a688e67f0b0e63003958c5f8e36c6b2"
Commit 37f4b126 authored by suiguoxin's avatar suiguoxin
Browse files

fix minimize mode err

parent df3952a2
authorName: default authorName: default
experimentName: example_auto-gbdt experimentName: example_auto-gbdt
trialConcurrency: 1 trialConcurrency: 1
maxExecDuration: 48h maxExecDuration: 10h
maxTrialNum: 1000 maxTrialNum: 10
#choice: local, remote, pai #choice: local, remote, pai
trainingServicePlatform: local trainingServicePlatform: local
searchSpacePath: search_space.json searchSpacePath: search_space.json
...@@ -11,11 +11,10 @@ useAnnotation: false ...@@ -11,11 +11,10 @@ useAnnotation: false
tuner: tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
#SMAC (SMAC should be installed through nnictl) #SMAC (SMAC should be installed through nnictl)
builtinTunerName: GPTuner builtinTunerName: TPE
classArgs: classArgs:
#choice: maximize, minimize #choice: maximize, minimize
optimize_mode: minimize optimize_mode: minimize
cold_start_num : 1
trial: trial:
command: python3 main.py command: python3 main.py
codeDir: . codeDir: .
......
...@@ -11,11 +11,10 @@ useAnnotation: false ...@@ -11,11 +11,10 @@ useAnnotation: false
tuner: tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner #choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner
#SMAC (SMAC should be installed through nnictl) #SMAC (SMAC should be installed through nnictl)
builtinTunerName: GPTuner builtinTunerName: TPE
classArgs: classArgs:
#choice: maximize, minimize #choice: maximize, minimize
optimize_mode: maximize optimize_mode: maximize
cold_start_num : 1
trial: trial:
command: python3 mnist.py command: python3 mnist.py
codeDir: . codeDir: .
......
...@@ -15,7 +15,8 @@ max-attributes=15 ...@@ -15,7 +15,8 @@ max-attributes=15
const-naming-style=any const-naming-style=any
disable=duplicate-code, disable=duplicate-code,
super-init-not-called super-init-not-called,
cell-var-from-loop
# List of members which are set dynamically and missed by pylint inference # List of members which are set dynamically and missed by pylint inference
generated-members=numpy.*,torch.* generated-members=numpy.*,torch.*
...@@ -29,7 +29,7 @@ from sklearn.gaussian_process.kernels import Matern ...@@ -29,7 +29,7 @@ from sklearn.gaussian_process.kernels import Matern
from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process import GaussianProcessRegressor
from nni.tuner import Tuner from nni.tuner import Tuner
from nni.utils import OptimizeMode, extract_scalar_reward from nni.utils import extract_scalar_reward
from .target_space import TargetSpace from .target_space import TargetSpace
from .util import UtilityFunction, acq_max from .util import UtilityFunction, acq_max
...@@ -42,12 +42,12 @@ class GPTuner(Tuner): ...@@ -42,12 +42,12 @@ class GPTuner(Tuner):
GPTuner GPTuner
''' '''
def __init__(self, optimize_mode="maximize", utility_kind='ei', kappa=5, xi=0, nu=2.5, alpha=1e-6, cold_start_num=10, def __init__(self, optimize_mode="maximize", utility='ei', kappa=5, xi=0, nu=2.5, alpha=1e-6, cold_start_num=10,
selection_num_warm_up=1e5, selection_num_starting_points=250): selection_num_warm_up=100000, selection_num_starting_points=250):
self.optimize_mode = optimize_mode self.optimize_mode = optimize_mode
# utility function related # utility function related
self.utility_kind = utility_kind self.utility = utility
self.kappa = kappa self.kappa = kappa
self.xi = xi self.xi = xi
...@@ -107,7 +107,7 @@ class GPTuner(Tuner): ...@@ -107,7 +107,7 @@ class GPTuner(Tuner):
self._gp.fit(self._space.params, self._space.target) self._gp.fit(self._space.params, self._space.target)
util = UtilityFunction( util = UtilityFunction(
kind=self.utility_kind, kappa=self.kappa, xi=self.xi) kind=self.utility, kappa=self.kappa, xi=self.xi)
results = acq_max( results = acq_max(
f_acq=util.utility, f_acq=util.utility,
...@@ -134,7 +134,7 @@ class GPTuner(Tuner): ...@@ -134,7 +134,7 @@ class GPTuner(Tuner):
if value is dict, it should have "default" key. if value is dict, it should have "default" key.
""" """
value = extract_scalar_reward(value) value = extract_scalar_reward(value)
if self.optimize_mode == OptimizeMode.Minimize: if self.optimize_mode == 'minimize':
value = -value value = -value
logger.info("Received trial result.") logger.info("Received trial result.")
......
...@@ -38,7 +38,7 @@ def _match_val_type(vals, bounds): ...@@ -38,7 +38,7 @@ def _match_val_type(vals, bounds):
if _type == "choice": if _type == "choice":
# Find the closest integer in the array, vals_bounds # Find the closest integer in the array, vals_bounds
vals_new.append( vals_new.append(
min(bounds[i]['_value'], key=lambda x: abs(x - vals[i]))) min(bound['_value'], key=lambda x: abs(x - vals[i])))
elif _type in ['quniform', 'randint']: elif _type in ['quniform', 'randint']:
vals_new.append(np.around(vals[i])) vals_new.append(np.around(vals[i]))
else: else:
...@@ -82,11 +82,12 @@ def acq_max(f_acq, gp, y_max, bounds, space, num_warmup, num_starting_points): ...@@ -82,11 +82,12 @@ def acq_max(f_acq, gp, y_max, bounds, space, num_warmup, num_starting_points):
# Warm up with random points # Warm up with random points
x_tries = [space.random_sample() x_tries = [space.random_sample()
for _ in range(int(num_warmup))] for _ in range(num_warmup)]
ys = f_acq(x_tries, gp=gp, y_max=y_max) ys = f_acq(x_tries, gp=gp, y_max=y_max)
x_max = x_tries[ys.argmax()] x_max = x_tries[ys.argmax()]
max_acq = ys.max() max_acq = ys.max()
# Explore the parameter space more throughly # Explore the parameter space more throughly
x_seeds = [space.random_sample() for _ in range(num_starting_points)] x_seeds = [space.random_sample() for _ in range(num_starting_points)]
...@@ -156,7 +157,7 @@ class UtilityFunction(): ...@@ -156,7 +157,7 @@ class UtilityFunction():
def _ei(x, gp, y_max, xi): def _ei(x, gp, y_max, xi):
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.simplefilter("ignore") warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True) mean, std = gp.predict(x, return_std=True) # TODO: sample_y ??
z = (mean - y_max - xi)/std z = (mean - y_max - xi)/std
return (mean - y_max - xi) * norm.cdf(z) + std * norm.pdf(z) return (mean - y_max - xi) * norm.cdf(z) + std * norm.pdf(z)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment