Commit 37f4b126 authored by suiguoxin's avatar suiguoxin
Browse files

fix minimize mode err

parent df3952a2
authorName: default
experimentName: example_auto-gbdt
trialConcurrency: 1
maxExecDuration: 48h
maxTrialNum: 1000
maxExecDuration: 10h
maxTrialNum: 10
#choice: local, remote, pai
trainingServicePlatform: local
searchSpacePath: search_space.json
......@@ -11,11 +11,10 @@ useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: GPTuner
builtinTunerName: TPE
classArgs:
#choice: maximize, minimize
optimize_mode: minimize
cold_start_num : 1
trial:
command: python3 main.py
codeDir: .
......
......@@ -11,11 +11,10 @@ useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner, GPTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName: GPTuner
builtinTunerName: TPE
classArgs:
#choice: maximize, minimize
optimize_mode: maximize
cold_start_num : 1
trial:
command: python3 mnist.py
codeDir: .
......
......@@ -15,7 +15,8 @@ max-attributes=15
const-naming-style=any
disable=duplicate-code,
super-init-not-called
super-init-not-called,
cell-var-from-loop
# List of members which are set dynamically and missed by pylint inference
generated-members=numpy.*,torch.*
......@@ -29,7 +29,7 @@ from sklearn.gaussian_process.kernels import Matern
from sklearn.gaussian_process import GaussianProcessRegressor
from nni.tuner import Tuner
from nni.utils import OptimizeMode, extract_scalar_reward
from nni.utils import extract_scalar_reward
from .target_space import TargetSpace
from .util import UtilityFunction, acq_max
......@@ -42,12 +42,12 @@ class GPTuner(Tuner):
GPTuner
'''
def __init__(self, optimize_mode="maximize", utility_kind='ei', kappa=5, xi=0, nu=2.5, alpha=1e-6, cold_start_num=10,
selection_num_warm_up=1e5, selection_num_starting_points=250):
def __init__(self, optimize_mode="maximize", utility='ei', kappa=5, xi=0, nu=2.5, alpha=1e-6, cold_start_num=10,
selection_num_warm_up=100000, selection_num_starting_points=250):
self.optimize_mode = optimize_mode
# utility function related
self.utility_kind = utility_kind
self.utility = utility
self.kappa = kappa
self.xi = xi
......@@ -107,7 +107,7 @@ class GPTuner(Tuner):
self._gp.fit(self._space.params, self._space.target)
util = UtilityFunction(
kind=self.utility_kind, kappa=self.kappa, xi=self.xi)
kind=self.utility, kappa=self.kappa, xi=self.xi)
results = acq_max(
f_acq=util.utility,
......@@ -134,7 +134,7 @@ class GPTuner(Tuner):
if value is dict, it should have "default" key.
"""
value = extract_scalar_reward(value)
if self.optimize_mode == OptimizeMode.Minimize:
if self.optimize_mode == 'minimize':
value = -value
logger.info("Received trial result.")
......
......@@ -38,7 +38,7 @@ def _match_val_type(vals, bounds):
if _type == "choice":
# Find the closest integer in the array, vals_bounds
vals_new.append(
min(bounds[i]['_value'], key=lambda x: abs(x - vals[i])))
min(bound['_value'], key=lambda x: abs(x - vals[i])))
elif _type in ['quniform', 'randint']:
vals_new.append(np.around(vals[i]))
else:
......@@ -82,11 +82,12 @@ def acq_max(f_acq, gp, y_max, bounds, space, num_warmup, num_starting_points):
# Warm up with random points
x_tries = [space.random_sample()
for _ in range(int(num_warmup))]
for _ in range(num_warmup)]
ys = f_acq(x_tries, gp=gp, y_max=y_max)
x_max = x_tries[ys.argmax()]
max_acq = ys.max()
# Explore the parameter space more throughly
x_seeds = [space.random_sample() for _ in range(num_starting_points)]
......@@ -156,7 +157,7 @@ class UtilityFunction():
def _ei(x, gp, y_max, xi):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
mean, std = gp.predict(x, return_std=True) # TODO: sample_y ??
z = (mean - y_max - xi)/std
return (mean - y_max - xi) * norm.cdf(z) + std * norm.pdf(z)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment