Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nni
Commits
e9040c9b
Unverified
Commit
e9040c9b
authored
Jul 03, 2019
by
chicm-ms
Committed by
GitHub
Jul 03, 2019
Browse files
Merge pull request #23 from microsoft/master
pull code
parents
256f27af
ed63175c
Changes
108
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
782 additions
and
40 deletions
+782
-40
examples/trials/mnist/config_assessor.yml
examples/trials/mnist/config_assessor.yml
+1
-1
examples/trials/mnist/config_frameworkcontroller.yml
examples/trials/mnist/config_frameworkcontroller.yml
+1
-1
examples/trials/mnist/config_kubeflow.yml
examples/trials/mnist/config_kubeflow.yml
+1
-1
examples/trials/mnist/config_pai.yml
examples/trials/mnist/config_pai.yml
+1
-1
examples/trials/mnist/config_windows.yml
examples/trials/mnist/config_windows.yml
+1
-1
examples/trials/network_morphism/README_zh_CN.md
examples/trials/network_morphism/README_zh_CN.md
+1
-1
examples/tuners/ga_customer_tuner/README_zh_CN.md
examples/tuners/ga_customer_tuner/README_zh_CN.md
+1
-1
examples/tuners/weight_sharing/ga_customer_tuner/README_zh_CN.md
...s/tuners/weight_sharing/ga_customer_tuner/README_zh_CN.md
+1
-1
src/nni_manager/rest_server/restValidationSchemas.ts
src/nni_manager/rest_server/restValidationSchemas.ts
+2
-1
src/sdk/pynni/nni/__init__.py
src/sdk/pynni/nni/__init__.py
+1
-0
src/sdk/pynni/nni/bohb_advisor/bohb_advisor.py
src/sdk/pynni/nni/bohb_advisor/bohb_advisor.py
+1
-1
src/sdk/pynni/nni/constants.py
src/sdk/pynni/nni/constants.py
+3
-1
src/sdk/pynni/nni/gp_tuner/__init__.py
src/sdk/pynni/nni/gp_tuner/__init__.py
+0
-0
src/sdk/pynni/nni/gp_tuner/gp_tuner.py
src/sdk/pynni/nni/gp_tuner/gp_tuner.py
+170
-0
src/sdk/pynni/nni/gp_tuner/target_space.py
src/sdk/pynni/nni/gp_tuner/target_space.py
+219
-0
src/sdk/pynni/nni/gp_tuner/util.py
src/sdk/pynni/nni/gp_tuner/util.py
+172
-0
src/sdk/pynni/nni/hyperband_advisor/hyperband_advisor.py
src/sdk/pynni/nni/hyperband_advisor/hyperband_advisor.py
+1
-1
src/sdk/pynni/nni/metis_tuner/Regression_GMM/Selection.py
src/sdk/pynni/nni/metis_tuner/Regression_GMM/Selection.py
+5
-4
src/sdk/pynni/nni/metis_tuner/metis_tuner.py
src/sdk/pynni/nni/metis_tuner/metis_tuner.py
+40
-24
src/sdk/pynni/nni/nas_utils.py
src/sdk/pynni/nni/nas_utils.py
+160
-0
No files found.
examples/trials/mnist/config_assessor.yml
View file @
e9040c9b
...
...
@@ -9,7 +9,7 @@ searchSpacePath: search_space.json
#choice: true, false
useAnnotation
:
false
tuner
:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
, GPTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName
:
TPE
classArgs
:
...
...
examples/trials/mnist/config_frameworkcontroller.yml
View file @
e9040c9b
...
...
@@ -9,7 +9,7 @@ searchSpacePath: search_space.json
#choice: true, false
useAnnotation
:
false
tuner
:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
, GPTuner
builtinTunerName
:
TPE
classArgs
:
#choice: maximize, minimize
...
...
examples/trials/mnist/config_kubeflow.yml
View file @
e9040c9b
...
...
@@ -9,7 +9,7 @@ searchSpacePath: search_space.json
#choice: true, false
useAnnotation
:
false
tuner
:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
, GPTuner
builtinTunerName
:
TPE
classArgs
:
#choice: maximize, minimize
...
...
examples/trials/mnist/config_pai.yml
View file @
e9040c9b
...
...
@@ -9,7 +9,7 @@ searchSpacePath: search_space.json
#choice: true, false
useAnnotation
:
false
tuner
:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
, GPTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName
:
TPE
classArgs
:
...
...
examples/trials/mnist/config_windows.yml
View file @
e9040c9b
...
...
@@ -9,7 +9,7 @@ searchSpacePath: search_space.json
#choice: true, false
useAnnotation
:
false
tuner
:
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
#choice: TPE, Random, Anneal, Evolution, BatchTuner, MetisTuner
, GPTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName
:
TPE
classArgs
:
...
...
examples/trials/network_morphism/README_zh_CN.md
View file @
e9040c9b
examples/tuners/ga_customer_tuner/README_zh_CN.md
View file @
e9040c9b
# 如何使用 ga_customer_tuner?
此定制的 Tuner 仅适用于代码 "~/nni/examples/trials/ga_squad",
输入
`cd ~/nni/examples/trials/ga_squad`
查看 readme.md 来了解 ga_squad 的更多信息。
此定制的 Tuner 仅适用于代码 "~/nni/examples/trials/ga_squad",输入
`cd ~/nni/examples/trials/ga_squad`
查看 readme.md 来了解 ga_squad 的更多信息。
# 配置
...
...
examples/tuners/weight_sharing/ga_customer_tuner/README_zh_CN.md
View file @
e9040c9b
# 如何使用 ga_customer_tuner?
此定制的 Tuner 仅适用于代码 "~/nni/examples/trials/ga_squad",
输入
`cd ~/nni/examples/trials/ga_squad`
查看 readme.md 来了解 ga_squad 的更多信息。
此定制的 Tuner 仅适用于代码 "~/nni/examples/trials/ga_squad",输入
`cd ~/nni/examples/trials/ga_squad`
查看 readme.md 来了解 ga_squad 的更多信息。
# 配置
...
...
src/nni_manager/rest_server/restValidationSchemas.ts
View file @
e9040c9b
...
...
@@ -51,6 +51,7 @@ export namespace ValidationSchemas {
command
:
joi
.
string
().
min
(
1
),
virtualCluster
:
joi
.
string
(),
shmMB
:
joi
.
number
(),
nasMode
:
joi
.
string
().
valid
(
'
classic_mode
'
,
'
enas_mode
'
,
'
oneshot_mode
'
),
worker
:
joi
.
object
({
replicas
:
joi
.
number
().
min
(
1
).
required
(),
image
:
joi
.
string
().
min
(
1
),
...
...
@@ -161,7 +162,7 @@ export namespace ValidationSchemas {
checkpointDir
:
joi
.
string
().
allow
(
''
)
}),
tuner
:
joi
.
object
({
builtinTunerName
:
joi
.
string
().
valid
(
'
TPE
'
,
'
Random
'
,
'
Anneal
'
,
'
Evolution
'
,
'
SMAC
'
,
'
BatchTuner
'
,
'
GridSearch
'
,
'
NetworkMorphism
'
,
'
MetisTuner
'
),
builtinTunerName
:
joi
.
string
().
valid
(
'
TPE
'
,
'
Random
'
,
'
Anneal
'
,
'
Evolution
'
,
'
SMAC
'
,
'
BatchTuner
'
,
'
GridSearch
'
,
'
NetworkMorphism
'
,
'
MetisTuner
'
,
'
GPTuner
'
),
codeDir
:
joi
.
string
(),
classFileName
:
joi
.
string
(),
className
:
joi
.
string
(),
...
...
src/sdk/pynni/nni/__init__.py
View file @
e9040c9b
...
...
@@ -23,6 +23,7 @@
from
.trial
import
*
from
.smartparam
import
*
from
.nas_utils
import
reload_tensorflow_variables
class
NoMoreTrialError
(
Exception
):
def
__init__
(
self
,
ErrorInfo
):
...
...
src/sdk/pynni/nni/bohb_advisor/bohb_advisor.py
View file @
e9040c9b
...
...
@@ -106,7 +106,7 @@ class Bracket():
self
.
s_max
=
s_max
self
.
eta
=
eta
self
.
max_budget
=
max_budget
self
.
optimize_mode
=
optimize_mode
self
.
optimize_mode
=
OptimizeMode
(
optimize_mode
)
self
.
n
=
math
.
ceil
((
s_max
+
1
)
*
eta
**
s
/
(
s
+
1
)
-
_epsilon
)
self
.
r
=
max_budget
/
eta
**
s
...
...
src/sdk/pynni/nni/constants.py
View file @
e9040c9b
...
...
@@ -29,7 +29,8 @@ ModuleName = {
'GridSearch'
:
'nni.gridsearch_tuner.gridsearch_tuner'
,
'NetworkMorphism'
:
'nni.networkmorphism_tuner.networkmorphism_tuner'
,
'Curvefitting'
:
'nni.curvefitting_assessor.curvefitting_assessor'
,
'MetisTuner'
:
'nni.metis_tuner.metis_tuner'
'MetisTuner'
:
'nni.metis_tuner.metis_tuner'
,
'GPTuner'
:
'nni.gp_tuner.gp_tuner'
}
ClassName
=
{
...
...
@@ -42,6 +43,7 @@ ClassName = {
'GridSearch'
:
'GridSearchTuner'
,
'NetworkMorphism'
:
'NetworkMorphismTuner'
,
'MetisTuner'
:
'MetisTuner'
,
'GPTuner'
:
'GPTuner'
,
'Medianstop'
:
'MedianstopAssessor'
,
'Curvefitting'
:
'CurvefittingAssessor'
...
...
src/sdk/pynni/nni/gp_tuner/__init__.py
0 → 100644
View file @
e9040c9b
src/sdk/pynni/nni/gp_tuner/gp_tuner.py
0 → 100644
View file @
e9040c9b
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
gp_tuner.py
'''
import
warnings
import
logging
import
numpy
as
np
from
sklearn.gaussian_process.kernels
import
Matern
from
sklearn.gaussian_process
import
GaussianProcessRegressor
from
nni.tuner
import
Tuner
from
nni.utils
import
OptimizeMode
,
extract_scalar_reward
from
.target_space
import
TargetSpace
from
.util
import
UtilityFunction
,
acq_max
logger
=
logging
.
getLogger
(
"GP_Tuner_AutoML"
)
class
GPTuner
(
Tuner
):
'''
GPTuner
'''
def
__init__
(
self
,
optimize_mode
=
"maximize"
,
utility
=
'ei'
,
kappa
=
5
,
xi
=
0
,
nu
=
2.5
,
alpha
=
1e-6
,
cold_start_num
=
10
,
selection_num_warm_up
=
100000
,
selection_num_starting_points
=
250
):
self
.
optimize_mode
=
OptimizeMode
(
optimize_mode
)
# utility function related
self
.
utility
=
utility
self
.
kappa
=
kappa
self
.
xi
=
xi
# target space
self
.
_space
=
None
self
.
_random_state
=
np
.
random
.
RandomState
()
# nu, alpha are GPR related params
self
.
_gp
=
GaussianProcessRegressor
(
kernel
=
Matern
(
nu
=
nu
),
alpha
=
alpha
,
normalize_y
=
True
,
n_restarts_optimizer
=
25
,
random_state
=
self
.
_random_state
)
# num of random evaluations before GPR
self
.
_cold_start_num
=
cold_start_num
# params for acq_max
self
.
_selection_num_warm_up
=
selection_num_warm_up
self
.
_selection_num_starting_points
=
selection_num_starting_points
# num of imported data
self
.
supplement_data_num
=
0
def
update_search_space
(
self
,
search_space
):
"""Update the self.bounds and self.types by the search_space.json
Parameters
----------
search_space : dict
"""
self
.
_space
=
TargetSpace
(
search_space
,
self
.
_random_state
)
def
generate_parameters
(
self
,
parameter_id
):
"""Generate next parameter for trial
If the number of trial result is lower than cold start number,
gp will first randomly generate some parameters.
Otherwise, choose the parameters by the Gussian Process Model
Parameters
----------
parameter_id : int
Returns
-------
result : dict
"""
if
self
.
_space
.
len
()
<
self
.
_cold_start_num
:
results
=
self
.
_space
.
random_sample
()
else
:
# Sklearn's GP throws a large number of warnings at times, but
# we don't really need to see them here.
with
warnings
.
catch_warnings
():
warnings
.
simplefilter
(
"ignore"
)
self
.
_gp
.
fit
(
self
.
_space
.
params
,
self
.
_space
.
target
)
util
=
UtilityFunction
(
kind
=
self
.
utility
,
kappa
=
self
.
kappa
,
xi
=
self
.
xi
)
results
=
acq_max
(
f_acq
=
util
.
utility
,
gp
=
self
.
_gp
,
y_max
=
self
.
_space
.
target
.
max
(),
bounds
=
self
.
_space
.
bounds
,
space
=
self
.
_space
,
num_warmup
=
self
.
_selection_num_warm_up
,
num_starting_points
=
self
.
_selection_num_starting_points
)
results
=
self
.
_space
.
array_to_params
(
results
)
logger
.
info
(
"Generate paramageters:
\n
%s"
,
results
)
return
results
def
receive_trial_result
(
self
,
parameter_id
,
parameters
,
value
):
"""Tuner receive result from trial.
Parameters
----------
parameter_id : int
parameters : dict
value : dict/float
if value is dict, it should have "default" key.
"""
value
=
extract_scalar_reward
(
value
)
if
self
.
optimize_mode
==
OptimizeMode
.
Minimize
:
value
=
-
value
logger
.
info
(
"Received trial result."
)
logger
.
info
(
"value :%s"
,
value
)
logger
.
info
(
"parameter : %s"
,
parameters
)
self
.
_space
.
register
(
parameters
,
value
)
def
import_data
(
self
,
data
):
"""Import additional data for tuning
Parameters
----------
data:
a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'
"""
_completed_num
=
0
for
trial_info
in
data
:
logger
.
info
(
"Importing data, current processing progress %s / %s"
%
(
_completed_num
,
len
(
data
)))
_completed_num
+=
1
assert
"parameter"
in
trial_info
_params
=
trial_info
[
"parameter"
]
assert
"value"
in
trial_info
_value
=
trial_info
[
'value'
]
if
not
_value
:
logger
.
info
(
"Useless trial data, value is %s, skip this trial data."
%
_value
)
continue
self
.
supplement_data_num
+=
1
_parameter_id
=
'_'
.
join
(
[
"ImportData"
,
str
(
self
.
supplement_data_num
)])
self
.
receive_trial_result
(
parameter_id
=
_parameter_id
,
parameters
=
_params
,
value
=
_value
)
logger
.
info
(
"Successfully import data to GP tuner."
)
src/sdk/pynni/nni/gp_tuner/target_space.py
0 → 100644
View file @
e9040c9b
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
target_space.py
'''
import
numpy
as
np
import
nni.parameter_expressions
as
parameter_expressions
def
_hashable
(
params
):
""" ensure that an point is hashable by a python dict """
return
tuple
(
map
(
float
,
params
))
class
TargetSpace
():
"""
Holds the param-space coordinates (X) and target values (Y)
"""
def
__init__
(
self
,
pbounds
,
random_state
=
None
):
"""
Parameters
----------
pbounds : dict
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
random_state : int, RandomState, or None
optionally specify a seed for a random number generator
"""
self
.
random_state
=
random_state
# Get the name of the parameters
self
.
_keys
=
sorted
(
pbounds
)
# Create an array with parameters bounds
self
.
_bounds
=
np
.
array
(
[
item
[
1
]
for
item
in
sorted
(
pbounds
.
items
(),
key
=
lambda
x
:
x
[
0
])]
)
# preallocated memory for X and Y points
self
.
_params
=
np
.
empty
(
shape
=
(
0
,
self
.
dim
))
self
.
_target
=
np
.
empty
(
shape
=
(
0
))
# keep track of unique points we have seen so far
self
.
_cache
=
{}
def
__contains__
(
self
,
params
):
'''
check if a parameter is already registered
'''
return
_hashable
(
params
)
in
self
.
_cache
def
len
(
self
):
'''
length of registered params and targets
'''
assert
len
(
self
.
_params
)
==
len
(
self
.
_target
)
return
len
(
self
.
_target
)
@
property
def
params
(
self
):
'''
params: numpy array
'''
return
self
.
_params
@
property
def
target
(
self
):
'''
target: numpy array
'''
return
self
.
_target
@
property
def
dim
(
self
):
'''
dim: int
length of keys
'''
return
len
(
self
.
_keys
)
@
property
def
keys
(
self
):
'''
keys: numpy array
'''
return
self
.
_keys
@
property
def
bounds
(
self
):
'''bounds'''
return
self
.
_bounds
def
params_to_array
(
self
,
params
):
''' dict to array '''
try
:
assert
set
(
params
)
==
set
(
self
.
keys
)
except
AssertionError
:
raise
ValueError
(
"Parameters' keys ({}) do "
.
format
(
sorted
(
params
))
+
"not match the expected set of keys ({})."
.
format
(
self
.
keys
)
)
return
np
.
asarray
([
params
[
key
]
for
key
in
self
.
keys
])
def
array_to_params
(
self
,
x
):
'''
array to dict
maintain int type if the paramters is defined as int in search_space.json
'''
try
:
assert
len
(
x
)
==
len
(
self
.
keys
)
except
AssertionError
:
raise
ValueError
(
"Size of array ({}) is different than the "
.
format
(
len
(
x
))
+
"expected number of parameters ({})."
.
format
(
self
.
dim
())
)
params
=
{}
for
i
,
_bound
in
enumerate
(
self
.
_bounds
):
if
_bound
[
'_type'
]
==
'choice'
and
all
(
isinstance
(
val
,
int
)
for
val
in
_bound
[
'_value'
]):
params
.
update
({
self
.
keys
[
i
]:
int
(
x
[
i
])})
elif
_bound
[
'_type'
]
in
[
'randint'
]:
params
.
update
({
self
.
keys
[
i
]:
int
(
x
[
i
])})
else
:
params
.
update
({
self
.
keys
[
i
]:
x
[
i
]})
return
params
def
register
(
self
,
params
,
target
):
"""
Append a point and its target value to the known data.
Parameters
----------
x : dict
y : float
target function value
"""
x
=
self
.
params_to_array
(
params
)
if
x
in
self
:
#raise KeyError('Data point {} is not unique'.format(x))
print
(
'Data point {} is not unique'
.
format
(
x
))
# Insert data into unique dictionary
self
.
_cache
[
_hashable
(
x
.
ravel
())]
=
target
self
.
_params
=
np
.
concatenate
([
self
.
_params
,
x
.
reshape
(
1
,
-
1
)])
self
.
_target
=
np
.
concatenate
([
self
.
_target
,
[
target
]])
def
random_sample
(
self
):
"""
Creates a random point within the bounds of the space.
"""
params
=
np
.
empty
(
self
.
dim
)
for
col
,
_bound
in
enumerate
(
self
.
_bounds
):
if
_bound
[
'_type'
]
==
'choice'
:
params
[
col
]
=
parameter_expressions
.
choice
(
_bound
[
'_value'
],
self
.
random_state
)
elif
_bound
[
'_type'
]
==
'randint'
:
params
[
col
]
=
self
.
random_state
.
randint
(
_bound
[
'_value'
][
0
],
_bound
[
'_value'
][
1
],
size
=
1
)
elif
_bound
[
'_type'
]
==
'uniform'
:
params
[
col
]
=
parameter_expressions
.
uniform
(
_bound
[
'_value'
][
0
],
_bound
[
'_value'
][
1
],
self
.
random_state
)
elif
_bound
[
'_type'
]
==
'quniform'
:
params
[
col
]
=
parameter_expressions
.
quniform
(
_bound
[
'_value'
][
0
],
_bound
[
'_value'
][
1
],
_bound
[
'_value'
][
2
],
self
.
random_state
)
elif
_bound
[
'_type'
]
==
'loguniform'
:
params
[
col
]
=
parameter_expressions
.
loguniform
(
_bound
[
'_value'
][
0
],
_bound
[
'_value'
][
1
],
self
.
random_state
)
elif
_bound
[
'_type'
]
==
'qloguniform'
:
params
[
col
]
=
parameter_expressions
.
qloguniform
(
_bound
[
'_value'
][
0
],
_bound
[
'_value'
][
1
],
_bound
[
'_value'
][
2
],
self
.
random_state
)
return
params
def
max
(
self
):
"""Get maximum target value found and corresponding parametes."""
try
:
res
=
{
'target'
:
self
.
target
.
max
(),
'params'
:
dict
(
zip
(
self
.
keys
,
self
.
params
[
self
.
target
.
argmax
()])
)
}
except
ValueError
:
res
=
{}
return
res
def
res
(
self
):
"""Get all target values found and corresponding parametes."""
params
=
[
dict
(
zip
(
self
.
keys
,
p
))
for
p
in
self
.
params
]
return
[
{
"target"
:
target
,
"params"
:
param
}
for
target
,
param
in
zip
(
self
.
target
,
params
)
]
src/sdk/pynni/nni/gp_tuner/util.py
0 → 100644
View file @
e9040c9b
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
gp_tuner.py
'''
import
warnings
import
numpy
as
np
from
scipy.stats
import
norm
from
scipy.optimize
import
minimize
def
_match_val_type
(
vals
,
bounds
):
'''
Update values in the array, to match their corresponding type
'''
vals_new
=
[]
for
i
,
bound
in
enumerate
(
bounds
):
_type
=
bound
[
'_type'
]
if
_type
==
"choice"
:
# Find the closest integer in the array, vals_bounds
vals_new
.
append
(
min
(
bound
[
'_value'
],
key
=
lambda
x
:
abs
(
x
-
vals
[
i
])))
elif
_type
in
[
'quniform'
,
'randint'
]:
vals_new
.
append
(
np
.
around
(
vals
[
i
]))
else
:
vals_new
.
append
(
vals
[
i
])
return
vals_new
def
acq_max
(
f_acq
,
gp
,
y_max
,
bounds
,
space
,
num_warmup
,
num_starting_points
):
"""
A function to find the maximum of the acquisition function
It uses a combination of random sampling (cheap) and the 'L-BFGS-B'
optimization method. First by sampling `n_warmup` (1e5) points at random,
and then running L-BFGS-B from `n_iter` (250) random starting points.
Parameters
----------
:param f_acq:
The acquisition function object that return its point-wise value.
:param gp:
A gaussian process fitted to the relevant data.
:param y_max:
The current maximum known value of the target function.
:param bounds:
The variables bounds to limit the search of the acq max.
:param num_warmup:
number of times to randomly sample the aquisition function
:param num_starting_points:
number of times to run scipy.minimize
Returns
-------
:return: x_max, The arg max of the acquisition function.
"""
# Warm up with random points
x_tries
=
[
space
.
random_sample
()
for
_
in
range
(
int
(
num_warmup
))]
ys
=
f_acq
(
x_tries
,
gp
=
gp
,
y_max
=
y_max
)
x_max
=
x_tries
[
ys
.
argmax
()]
max_acq
=
ys
.
max
()
# Explore the parameter space more throughly
x_seeds
=
[
space
.
random_sample
()
for
_
in
range
(
int
(
num_starting_points
))]
bounds_minmax
=
np
.
array
(
[[
bound
[
'_value'
][
0
],
bound
[
'_value'
][
-
1
]]
for
bound
in
bounds
])
for
x_try
in
x_seeds
:
# Find the minimum of minus the acquisition function
res
=
minimize
(
lambda
x
:
-
f_acq
(
x
.
reshape
(
1
,
-
1
),
gp
=
gp
,
y_max
=
y_max
),
x_try
.
reshape
(
1
,
-
1
),
bounds
=
bounds_minmax
,
method
=
"L-BFGS-B"
)
# See if success
if
not
res
.
success
:
continue
# Store it if better than previous minimum(maximum).
if
max_acq
is
None
or
-
res
.
fun
[
0
]
>=
max_acq
:
x_max
=
_match_val_type
(
res
.
x
,
bounds
)
max_acq
=
-
res
.
fun
[
0
]
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return
np
.
clip
(
x_max
,
bounds_minmax
[:,
0
],
bounds_minmax
[:,
1
])
class
UtilityFunction
():
"""
An object to compute the acquisition functions.
"""
def
__init__
(
self
,
kind
,
kappa
,
xi
):
"""
If UCB is to be used, a constant kappa is needed.
"""
self
.
kappa
=
kappa
self
.
xi
=
xi
if
kind
not
in
[
'ucb'
,
'ei'
,
'poi'
]:
err
=
"The utility function "
\
"{} has not been implemented, "
\
"please choose one of ucb, ei, or poi."
.
format
(
kind
)
raise
NotImplementedError
(
err
)
self
.
kind
=
kind
def
utility
(
self
,
x
,
gp
,
y_max
):
'''return utility function'''
if
self
.
kind
==
'ucb'
:
return
self
.
_ucb
(
x
,
gp
,
self
.
kappa
)
if
self
.
kind
==
'ei'
:
return
self
.
_ei
(
x
,
gp
,
y_max
,
self
.
xi
)
if
self
.
kind
==
'poi'
:
return
self
.
_poi
(
x
,
gp
,
y_max
,
self
.
xi
)
return
None
@
staticmethod
def
_ucb
(
x
,
gp
,
kappa
):
with
warnings
.
catch_warnings
():
warnings
.
simplefilter
(
"ignore"
)
mean
,
std
=
gp
.
predict
(
x
,
return_std
=
True
)
return
mean
+
kappa
*
std
@
staticmethod
def
_ei
(
x
,
gp
,
y_max
,
xi
):
with
warnings
.
catch_warnings
():
warnings
.
simplefilter
(
"ignore"
)
mean
,
std
=
gp
.
predict
(
x
,
return_std
=
True
)
z
=
(
mean
-
y_max
-
xi
)
/
std
return
(
mean
-
y_max
-
xi
)
*
norm
.
cdf
(
z
)
+
std
*
norm
.
pdf
(
z
)
@
staticmethod
def
_poi
(
x
,
gp
,
y_max
,
xi
):
with
warnings
.
catch_warnings
():
warnings
.
simplefilter
(
"ignore"
)
mean
,
std
=
gp
.
predict
(
x
,
return_std
=
True
)
z
=
(
mean
-
y_max
-
xi
)
/
std
return
norm
.
cdf
(
z
)
src/sdk/pynni/nni/hyperband_advisor/hyperband_advisor.py
View file @
e9040c9b
...
...
@@ -144,7 +144,7 @@ class Bracket():
self
.
configs_perf
=
[]
# [ {id: [seq, acc]}, {}, ... ]
self
.
num_configs_to_run
=
[]
# [ n, n, n, ... ]
self
.
num_finished_configs
=
[]
# [ n, n, n, ... ]
self
.
optimize_mode
=
optimize_mode
self
.
optimize_mode
=
OptimizeMode
(
optimize_mode
)
self
.
no_more_trial
=
False
def
is_completed
(
self
):
...
...
src/sdk/pynni/nni/metis_tuner/Regression_GMM/Selection.py
View file @
e9040c9b
...
...
@@ -49,15 +49,16 @@ def selection_r(x_bounds,
num_starting_points
=
100
,
minimize_constraints_fun
=
None
):
'''
Call selection
Select using different types.
'''
minimize_starting_points
=
[
lib_data
.
rand
(
x_bounds
,
x_type
s
)
\
for
i
in
range
(
0
,
num_starting_points
)]
minimize_starting_points
=
clusteringmodel_gmm_good
.
sample
(
n_samples
=
num_starting_point
s
)
outputs
=
selection
(
x_bounds
,
x_types
,
clusteringmodel_gmm_good
,
clusteringmodel_gmm_bad
,
minimize_starting_points
,
minimize_starting_points
[
0
]
,
minimize_constraints_fun
)
return
outputs
def
selection
(
x_bounds
,
...
...
src/sdk/pynni/nni/metis_tuner/metis_tuner.py
View file @
e9040c9b
...
...
@@ -20,15 +20,15 @@
import
copy
import
logging
import
numpy
as
np
import
os
import
random
import
statistics
import
sys
import
warnings
from
enum
import
Enum
,
unique
from
multiprocessing.dummy
import
Pool
as
ThreadPool
import
numpy
as
np
import
nni.metis_tuner.lib_constraint_summation
as
lib_constraint_summation
import
nni.metis_tuner.lib_data
as
lib_data
import
nni.metis_tuner.Regression_GMM.CreateModel
as
gmm_create_model
...
...
@@ -42,8 +42,6 @@ from nni.utils import OptimizeMode, extract_scalar_reward
logger
=
logging
.
getLogger
(
"Metis_Tuner_AutoML"
)
NONE_TYPE
=
''
CONSTRAINT_LOWERBOUND
=
None
CONSTRAINT_UPPERBOUND
=
None
...
...
@@ -93,7 +91,7 @@ class MetisTuner(Tuner):
self
.
space
=
None
self
.
no_resampling
=
no_resampling
self
.
no_candidates
=
no_candidates
self
.
optimize_mode
=
optimize_mode
self
.
optimize_mode
=
OptimizeMode
(
optimize_mode
)
self
.
key_order
=
[]
self
.
cold_start_num
=
cold_start_num
self
.
selection_num_starting_points
=
selection_num_starting_points
...
...
@@ -254,6 +252,9 @@ class MetisTuner(Tuner):
threshold_samplessize_resampling
=
50
,
no_candidates
=
False
,
minimize_starting_points
=
None
,
minimize_constraints_fun
=
None
):
with
warnings
.
catch_warnings
():
warnings
.
simplefilter
(
"ignore"
)
next_candidate
=
None
candidates
=
[]
samples_size_all
=
sum
([
len
(
i
)
for
i
in
samples_y
])
...
...
@@ -271,13 +272,12 @@ class MetisTuner(Tuner):
minimize_constraints_fun
=
minimize_constraints_fun
)
if
not
lm_current
:
return
None
if
no_candidates
is
False
:
candidates
.
append
({
'hyperparameter'
:
lm_current
[
'hyperparameter'
],
logger
.
info
({
'hyperparameter'
:
lm_current
[
'hyperparameter'
],
'expected_mu'
:
lm_current
[
'expected_mu'
],
'expected_sigma'
:
lm_current
[
'expected_sigma'
],
'reason'
:
"exploitation_gp"
})
if
no_candidates
is
False
:
# ===== STEP 2: Get recommended configurations for exploration =====
results_exploration
=
gp_selection
.
selection
(
"lc"
,
...
...
@@ -290,34 +290,48 @@ class MetisTuner(Tuner):
if
results_exploration
is
not
None
:
if
_num_past_samples
(
results_exploration
[
'hyperparameter'
],
samples_x
,
samples_y
)
==
0
:
candidate
s
.
append
(
{
'hyperparameter'
:
results_exploration
[
'hyperparameter'
],
temp_
candidate
=
{
'hyperparameter'
:
results_exploration
[
'hyperparameter'
],
'expected_mu'
:
results_exploration
[
'expected_mu'
],
'expected_sigma'
:
results_exploration
[
'expected_sigma'
],
'reason'
:
"exploration"
})
'reason'
:
"exploration"
}
candidates
.
append
(
temp_candidate
)
logger
.
info
(
"DEBUG: 1 exploration candidate selected
\n
"
)
logger
.
info
(
temp_candidate
)
else
:
logger
.
info
(
"DEBUG: No suitable exploration candidates were"
)
# ===== STEP 3: Get recommended configurations for exploitation =====
if
samples_size_all
>=
threshold_samplessize_exploitation
:
print
(
"Getting candidates for exploitation...
\n
"
)
logger
.
info
(
"Getting candidates for exploitation...
\n
"
)
try
:
gmm
=
gmm_create_model
.
create_model
(
samples_x
,
samples_y_aggregation
)
results_exploitation
=
gmm_selection
.
selection
(
x_bounds
,
x_types
,
if
(
"discrete_int"
in
x_types
)
or
(
"range_int"
in
x_types
):
results_exploitation
=
gmm_selection
.
selection
(
x_bounds
,
x_types
,
gmm
[
'clusteringmodel_good'
],
gmm
[
'clusteringmodel_bad'
],
minimize_starting_points
,
minimize_constraints_fun
=
minimize_constraints_fun
)
else
:
# If all parameters are of "range_continuous", let's use GMM to generate random starting points
results_exploitation
=
gmm_selection
.
selection_r
(
x_bounds
,
x_types
,
gmm
[
'clusteringmodel_good'
],
gmm
[
'clusteringmodel_bad'
],
num_starting_points
=
self
.
selection_num_starting_points
,
minimize_constraints_fun
=
minimize_constraints_fun
)
if
results_exploitation
is
not
None
:
if
_num_past_samples
(
results_exploitation
[
'hyperparameter'
],
samples_x
,
samples_y
)
==
0
:
candidates
.
append
({
'hyperparameter'
:
results_exploitation
[
'hyperparameter'
],
\
'expected_mu'
:
results_exploitation
[
'expected_mu'
],
\
'expected_sigma'
:
results_exploitation
[
'expected_sigma'
],
\
'reason'
:
"exploitation_gmm"
})
temp_expected_mu
,
temp_expected_sigma
=
gp_prediction
.
predict
(
results_exploitation
[
'hyperparameter'
],
gp_model
[
'model'
])
temp_candidate
=
{
'hyperparameter'
:
results_exploitation
[
'hyperparameter'
],
'expected_mu'
:
temp_expected_mu
,
'expected_sigma'
:
temp_expected_sigma
,
'reason'
:
"exploitation_gmm"
}
candidates
.
append
(
temp_candidate
)
logger
.
info
(
"DEBUG: 1 exploitation_gmm candidate selected
\n
"
)
logger
.
info
(
temp_candidate
)
else
:
logger
.
info
(
"DEBUG: No suitable exploitation_gmm candidates were found
\n
"
)
...
...
@@ -338,11 +352,13 @@ class MetisTuner(Tuner):
if
results_outliers
is
not
None
:
for
results_outlier
in
results_outliers
:
if
_num_past_samples
(
samples_x
[
results_outlier
[
'samples_idx'
]],
samples_x
,
samples_y
)
<
max_resampling_per_x
:
candidate
s
.
append
(
{
'hyperparameter'
:
samples_x
[
results_outlier
[
'samples_idx'
]],
\
temp_
candidate
=
{
'hyperparameter'
:
samples_x
[
results_outlier
[
'samples_idx'
]],
\
'expected_mu'
:
results_outlier
[
'expected_mu'
],
\
'expected_sigma'
:
results_outlier
[
'expected_sigma'
],
\
'reason'
:
"resampling"
})
'reason'
:
"resampling"
}
candidates
.
append
(
temp_candidate
)
logger
.
info
(
"DEBUG: %d re-sampling candidates selected
\n
"
)
logger
.
info
(
temp_candidate
)
else
:
logger
.
info
(
"DEBUG: No suitable resampling candidates were found
\n
"
)
...
...
src/sdk/pynni/nni/nas_utils.py
0 → 100644
View file @
e9040c9b
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
from
.
import
trial
def
classic_mode
(
mutable_id
,
mutable_layer_id
,
funcs
,
funcs_args
,
fixed_inputs
,
optional_inputs
,
optional_input_size
):
'''Execute the chosen function and inputs directly.
In this mode, the trial code is only running the chosen subgraph (i.e., the chosen ops and inputs),
without touching the full model graph.'''
if
trial
.
_params
is
None
:
trial
.
get_next_parameter
()
mutable_block
=
trial
.
get_current_parameter
(
mutable_id
)
chosen_layer
=
mutable_block
[
mutable_layer_id
][
"chosen_layer"
]
chosen_inputs
=
mutable_block
[
mutable_layer_id
][
"chosen_inputs"
]
real_chosen_inputs
=
[
optional_inputs
[
input_name
]
for
input_name
in
chosen_inputs
]
layer_out
=
funcs
[
chosen_layer
](
[
fixed_inputs
,
real_chosen_inputs
],
**
funcs_args
[
chosen_layer
])
return
layer_out
def
enas_mode
(
mutable_id
,
mutable_layer_id
,
funcs
,
funcs_args
,
fixed_inputs
,
optional_inputs
,
optional_input_size
,
tf
):
'''For enas mode, we build the full model graph in trial but only run a subgraph。
This is implemented by masking inputs and branching ops.
Specifically, based on the received subgraph (through nni.get_next_parameter),
it can be known which inputs should be masked and which op should be executed.'''
name_prefix
=
"{}_{}"
.
format
(
mutable_id
,
mutable_layer_id
)
# store namespace
if
'name_space'
not
in
globals
():
global
name_space
name_space
=
dict
()
name_space
[
mutable_id
]
=
True
name_space
[
name_prefix
]
=
dict
()
name_space
[
name_prefix
][
'funcs'
]
=
list
(
funcs
)
name_space
[
name_prefix
][
'optional_inputs'
]
=
list
(
optional_inputs
)
# create tensorflow variables as 1/0 signals used to form subgraph
if
'tf_variables'
not
in
globals
():
global
tf_variables
tf_variables
=
dict
()
name_for_optional_inputs
=
name_prefix
+
'_optional_inputs'
name_for_funcs
=
name_prefix
+
'_funcs'
tf_variables
[
name_prefix
]
=
dict
()
tf_variables
[
name_prefix
][
'optional_inputs'
]
=
tf
.
get_variable
(
name_for_optional_inputs
,
[
len
(
optional_inputs
)],
dtype
=
tf
.
bool
,
trainable
=
False
)
tf_variables
[
name_prefix
][
'funcs'
]
=
tf
.
get_variable
(
name_for_funcs
,
[],
dtype
=
tf
.
int64
,
trainable
=
False
)
# get real values using their variable names
real_optional_inputs_value
=
[
optional_inputs
[
name
]
for
name
in
name_space
[
name_prefix
][
'optional_inputs'
]]
real_func_value
=
[
funcs
[
name
]
for
name
in
name_space
[
name_prefix
][
'funcs'
]]
real_funcs_args
=
[
funcs_args
[
name
]
for
name
in
name_space
[
name_prefix
][
'funcs'
]]
# build tensorflow graph of geting chosen inputs by masking
real_chosen_inputs
=
tf
.
boolean_mask
(
real_optional_inputs_value
,
tf_variables
[
name_prefix
][
'optional_inputs'
])
# build tensorflow graph of different branches by using tf.case
branches
=
dict
()
for
func_id
in
range
(
len
(
funcs
)):
func_output
=
real_func_value
[
func_id
](
[
fixed_inputs
,
real_chosen_inputs
],
**
real_funcs_args
[
func_id
])
branches
[
tf
.
equal
(
tf_variables
[
name_prefix
][
'funcs'
],
func_id
)]
=
lambda
:
func_output
layer_out
=
tf
.
case
(
branches
,
exclusive
=
True
,
default
=
lambda
:
func_output
)
return
layer_out
def
oneshot_mode
(
mutable_id
,
mutable_layer_id
,
funcs
,
funcs_args
,
fixed_inputs
,
optional_inputs
,
optional_input_size
,
tf
):
'''Similar to enas mode, oneshot mode also builds the full model graph.
The difference is that oneshot mode does not receive subgraph.
Instead, it uses dropout to randomly dropout inputs and ops.'''
# NNI requires to get_next_parameter before report a result. But the parameter will not be used in this mode
if
trial
.
_params
is
None
:
trial
.
get_next_parameter
()
optional_inputs
=
list
(
optional_inputs
.
values
())
inputs_num
=
len
(
optional_inputs
)
# Calculate dropout rate according to the formular r^(1/k), where r is a hyper-parameter and k is the number of inputs
if
inputs_num
>
0
:
rate
=
0.01
**
(
1
/
inputs_num
)
noise_shape
=
[
inputs_num
]
+
[
1
]
*
len
(
optional_inputs
[
0
].
get_shape
())
optional_inputs
=
tf
.
nn
.
dropout
(
optional_inputs
,
rate
=
rate
,
noise_shape
=
noise_shape
)
optional_inputs
=
[
optional_inputs
[
idx
]
for
idx
in
range
(
inputs_num
)]
layer_outs
=
[
func
([
fixed_inputs
,
optional_inputs
],
**
funcs_args
[
func_name
])
for
func_name
,
func
in
funcs
.
items
()]
layer_out
=
tf
.
add_n
(
layer_outs
)
return
layer_out
def
reload_tensorflow_variables
(
session
,
tf
=
None
):
'''In Enas mode, this function reload every signal varaible created in `enas_mode` function so
the whole tensorflow graph will be changed into certain subgraph recerived from Tuner.
---------------
session: the tensorflow session created by users
tf: tensorflow module
'''
subgraph_from_tuner
=
trial
.
get_next_parameter
()
for
mutable_id
,
mutable_block
in
subgraph_from_tuner
.
items
():
if
mutable_id
not
in
name_space
:
continue
for
mutable_layer_id
,
mutable_layer
in
mutable_block
.
items
():
name_prefix
=
"{}_{}"
.
format
(
mutable_id
,
mutable_layer_id
)
# extract layer information from the subgraph sampled by tuner
chosen_layer
=
name_space
[
name_prefix
][
'funcs'
].
index
(
mutable_layer
[
"chosen_layer"
])
chosen_inputs
=
[
1
if
inp
in
mutable_layer
[
"chosen_inputs"
]
else
0
for
inp
in
name_space
[
name_prefix
][
'optional_inputs'
]]
# load these information into pre-defined tensorflow variables
tf_variables
[
name_prefix
][
'funcs'
].
load
(
chosen_layer
,
session
)
tf_variables
[
name_prefix
][
'optional_inputs'
].
load
(
chosen_inputs
,
session
)
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment