Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nni
Commits
22ee2ac4
"docs/source/compression/overview.rst" did not exist on "9fde0e8e003d894003ebafa5a30160f5fb9dc2c2"
Unverified
Commit
22ee2ac4
authored
Mar 18, 2022
by
liuzhe-lz
Committed by
GitHub
Mar 18, 2022
Browse files
HPO doc update (#4634)
parent
899a7959
Changes
25
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
87 additions
and
106 deletions
+87
-106
nni/algorithms/hpo/gridsearch_tuner.py
nni/algorithms/hpo/gridsearch_tuner.py
+23
-0
nni/algorithms/hpo/hyperopt_tuner.py
nni/algorithms/hpo/hyperopt_tuner.py
+26
-57
nni/algorithms/hpo/medianstop_assessor.py
nni/algorithms/hpo/medianstop_assessor.py
+24
-34
nni/algorithms/hpo/tpe_tuner.py
nni/algorithms/hpo/tpe_tuner.py
+11
-9
setup_ts.py
setup_ts.py
+3
-6
No files found.
nni/algorithms/hpo/gridsearch_tuner.py
View file @
22ee2ac4
...
...
@@ -63,6 +63,29 @@ _logger = logging.getLogger('nni.tuner.gridsearch')
##
class
GridSearchTuner
(
Tuner
):
"""
The original grid search approach performs an exhaustive search through a space consists of ``choice`` and ``randint``.
This implementation extends grid search to support all NNI search spaces.
When the search space contains continuous parameters like ``normal`` and ``loguniform``,
grid search tuner works in following steps:
1. Divide the search space into a grid.
2. Perform an exhaustive searth throught the grid.
3. Subdivide the grid into a finer-grained one.
4. Goto step 2, until experiment end.
As a deterministic algorithm, grid search has no argument.
Examples
--------
.. code-block::
config.tuner.name = 'GridSearch'
"""
def
__init__
(
self
):
self
.
space
=
None
...
...
nni/algorithms/hpo/hyperopt_tuner.py
View file @
22ee2ac4
...
...
@@ -191,23 +191,31 @@ class HyperoptClassArgsValidator(ClassArgsValidator):
class
HyperoptTuner
(
Tuner
):
"""
HyperoptTuner is a tuner which using hyperopt algorithm.
NNI wraps `hyperopt <https://github.com/hyperopt/hyperopt>`__ to provide anneal tuner.
This simple annealing algorithm begins by sampling from the prior
but tends over time to sample from points closer and closer to the best ones observed.
This algorithm is a simple variation of random search that leverages smoothness in the response surface.
The annealing rate is not adaptive.
Examples
--------
.. code-block::
config.tuner.name = 'Anneal'
config.tuner.class_args = {
'optimize_mode': 'minimize'
}
Parameters
----------
optimze_mode: 'minimize' or 'maximize'
Whether optimize to minimize or maximize trial result.
"""
def
__init__
(
self
,
algorithm_name
,
optimize_mode
=
'minimize'
,
parallel_optimize
=
False
,
constant_liar_type
=
'min'
):
"""
Parameters
----------
algorithm_name : str
algorithm_name includes "tpe", "random_search" and anneal".
optimize_mode : str
parallel_optimize : bool
More detail could reference: docs/en_US/Tuner/HyperoptTuner.md
constant_liar_type : str
constant_liar_type including "min", "max" and "mean"
More detail could reference: docs/en_US/Tuner/HyperoptTuner.md
"""
self
.
algorithm_name
=
algorithm_name
self
.
optimize_mode
=
OptimizeMode
(
optimize_mode
)
self
.
json
=
None
...
...
@@ -238,15 +246,6 @@ class HyperoptTuner(Tuner):
raise
RuntimeError
(
'Not support tuner algorithm in hyperopt.'
)
def
update_search_space
(
self
,
search_space
):
"""
Update search space definition in tuner by search_space in parameters.
Will called when first setup experiemnt or update search space in WebUI.
Parameters
----------
search_space : dict
"""
validate_search_space
(
search_space
)
self
.
json
=
search_space
...
...
@@ -266,22 +265,11 @@ class HyperoptTuner(Tuner):
self
.
rval
.
catch_eval_exceptions
=
False
def
generate_parameters
(
self
,
parameter_id
,
**
kwargs
):
"""
Returns a set of trial (hyper-)parameters, as a serializable object.
Parameters
----------
parameter_id : int
Returns
-------
params : dict
"""
total_params
=
self
.
get_suggestion
(
random_search
=
False
)
total_params
=
self
.
_get_suggestion
(
random_search
=
False
)
# avoid generating same parameter with concurrent trials because hyperopt doesn't support parallel mode
if
total_params
in
self
.
total_data
.
values
():
# but it can cause duplicate parameter rarely
total_params
=
self
.
get_suggestion
(
random_search
=
True
)
total_params
=
self
.
_
get_suggestion
(
random_search
=
True
)
self
.
total_data
[
parameter_id
]
=
total_params
if
self
.
parallel
:
...
...
@@ -291,17 +279,6 @@ class HyperoptTuner(Tuner):
return
params
def
receive_trial_result
(
self
,
parameter_id
,
parameters
,
value
,
**
kwargs
):
"""
Record an observation of the objective function
Parameters
----------
parameter_id : int
parameters : dict
value : dict/float
if value is dict, it should have "default" key.
value is final metrics of the trial.
"""
reward
=
extract_scalar_reward
(
value
)
# restore the paramsters contains '_index'
if
parameter_id
not
in
self
.
total_data
:
...
...
@@ -369,7 +346,7 @@ class HyperoptTuner(Tuner):
idxs
[
key
]
=
[
new_id
]
vals
[
key
]
=
[
vals
[
key
]]
self
.
miscs_update_idxs_vals
(
rval_miscs
,
self
.
_
miscs_update_idxs_vals
(
rval_miscs
,
idxs
,
vals
,
idxs_map
=
{
new_id
:
new_id
},
...
...
@@ -382,7 +359,7 @@ class HyperoptTuner(Tuner):
trials
.
insert_trial_docs
([
trial
])
trials
.
refresh
()
def
miscs_update_idxs_vals
(
self
,
def
_
miscs_update_idxs_vals
(
self
,
miscs
,
idxs
,
vals
,
...
...
@@ -416,7 +393,7 @@ class HyperoptTuner(Tuner):
misc_by_id
[
tid
][
'idxs'
][
key
]
=
[
tid
]
misc_by_id
[
tid
][
'vals'
][
key
]
=
[
val
]
def
get_suggestion
(
self
,
random_search
=
False
):
def
_
get_suggestion
(
self
,
random_search
=
False
):
"""
get suggestion from hyperopt
...
...
@@ -469,14 +446,6 @@ class HyperoptTuner(Tuner):
return
total_params
def
import_data
(
self
,
data
):
"""
Import additional data for tuning
Parameters
----------
data:
a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'
"""
_completed_num
=
0
for
trial_info
in
data
:
logger
.
info
(
"Importing data, current processing progress %s / %s"
,
_completed_num
,
len
(
data
))
...
...
nni/algorithms/hpo/medianstop_assessor.py
View file @
22ee2ac4
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from
__future__
import
annotations
import
logging
from
schema
import
Schema
,
Optional
from
nni
import
ClassArgsValidator
from
nni.assessor
import
Assessor
,
AssessResult
from
nni.typehint
import
Literal
from
nni.utils
import
extract_scalar_history
logger
=
logging
.
getLogger
(
'medianstop_Assessor'
)
...
...
@@ -18,18 +21,33 @@ class MedianstopClassArgsValidator(ClassArgsValidator):
}).
validate
(
kwargs
)
class
MedianstopAssessor
(
Assessor
):
"""MedianstopAssessor is The median stopping rule stops a pending trial X at step S
"""
The median stopping rule stops a pending trial X at step S
if the trial’s best objective value by step S is strictly worse than the median value
of the running averages of all completed trials’ objectives reported up to step S
The algorithm is mentioned in *Google Vizer: A Service for Black-Box Optimization*.
(`paper <https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46180.pdf>`__)
Examples
--------
.. code-block::
config.assessor.name = 'Medianstop'
config.tuner.class_args = {
'optimize_mode': 'maximize'
}
Parameters
----------
optimize_mode
: str
optimize
mode, 'max
imize
'
or
'min
imize
'
start_step
: int
o
nly after receiving start_step number of reported intermediate results
optimize_mode
Whether
optimize
to min
imize or
max
imize
trial result.
start_step
O
nly after receiving start_step number of reported intermediate results
.
"""
def
__init__
(
self
,
optimize_mode
=
'maximize'
,
start_step
=
0
):
def
__init__
(
self
,
optimize_mode
:
Literal
[
'minimize'
,
'maximize'
]
=
'maximize'
,
start_step
:
int
=
0
):
self
.
_start_step
=
start_step
self
.
_running_history
=
dict
()
self
.
_completed_avg_history
=
dict
()
...
...
@@ -56,15 +74,6 @@ class MedianstopAssessor(Assessor):
self
.
_running_history
[
trial_job_id
].
extend
(
trial_history
[
len
(
self
.
_running_history
[
trial_job_id
]):])
def
trial_end
(
self
,
trial_job_id
,
success
):
"""trial_end
Parameters
----------
trial_job_id : int
trial job id
success : bool
True if succssfully finish the experiment, False otherwise
"""
if
trial_job_id
in
self
.
_running_history
:
if
success
:
cnt
=
0
...
...
@@ -79,25 +88,6 @@ class MedianstopAssessor(Assessor):
logger
.
warning
(
'trial_end: trial_job_id does not exist in running_history'
)
def
assess_trial
(
self
,
trial_job_id
,
trial_history
):
"""assess_trial
Parameters
----------
trial_job_id : int
trial job id
trial_history : list
The history performance matrix of each trial
Returns
-------
bool
AssessResult.Good or AssessResult.Bad
Raises
------
Exception
unrecognize exception in medianstop_assessor
"""
curr_step
=
len
(
trial_history
)
if
curr_step
<
self
.
_start_step
:
return
AssessResult
.
Good
...
...
nni/algorithms/hpo/tpe_tuner.py
View file @
22ee2ac4
...
...
@@ -2,9 +2,10 @@
# Licensed under the MIT license.
"""
Tree-structured Parzen Estimator (TPE) tuner
for hyper-parameter optimization
.
Tree-structured Parzen Estimator (TPE) tuner.
Paper: https://proceedings.neurips.cc/paper/2011/file/86e8f7ab32cfd12577bc2619bc635690-Paper.pdf
Official code: https://github.com/hyperopt/hyperopt/blob/master/hyperopt/tpe.py
This is a slightly modified re-implementation of the algorithm.
...
...
@@ -34,8 +35,9 @@ _logger = logging.getLogger('nni.tuner.tpe')
class
TpeArguments
(
NamedTuple
):
"""
These are the hyper-parameters of TPE algorithm itself.
To avoid confusing with trials' hyper-parameters, they are called "arguments" in TPE source code.
Hyperparameters of TPE algorithm itself.
To avoid confusing with trials' hyperparameters to be tuned, these are called "arguments" here.
Parameters
----------
...
...
@@ -83,15 +85,15 @@ class TpeArguments(NamedTuple):
class
TpeTuner
(
Tuner
):
"""
Tree-structured Parzen Estimator (TPE)
is an SMBO
tuner.
Tree-structured Parzen Estimator (TPE) tuner.
TPE models P(x|y) and P(y) where x represents hyperparameters and y the associated evaluation metric.
TPE is an SMBO algorithm.
It models P(x|y) and P(y) where x represents hyperparameters and y the evaluation result.
P(x|y) is modeled by transforming the generative process of hyperparameters,
replacing the distributions of the configuration prior with non-parametric densities.
TPE is described in detail in *Algorithms for Hyper-Parameter Optimization*. (`paper`_)
.. _paper: https://proceedings.neurips.cc/paper/2011/file/86e8f7ab32cfd12577bc2619bc635690-Paper.pdf
Paper: :footcite:`bergstra2011algorithms`.
(`PDF <https://proceedings.neurips.cc/paper/2011/file/86e8f7ab32cfd12577bc2619bc635690-Paper.pdf>`__)
Examples
--------
...
...
@@ -102,7 +104,7 @@ class TpeTuner(Tuner):
config.tuner.name = 'TPE'
config.tuner.class_args = {
'optimize_mode': 'm
in
imize'
'optimize_mode': 'm
ax
imize'
}
.. code-block::
...
...
setup_ts.py
View file @
22ee2ac4
...
...
@@ -225,12 +225,9 @@ def copy_nni_node(version):
"""
_print
(
'Copying files'
)
# copytree(..., dirs_exist_ok=True) is not supported by Python 3.6
for
path
in
Path
(
'ts/nni_manager/dist'
).
iterdir
():
if
path
.
is_dir
():
shutil
.
copytree
(
path
,
Path
(
'nni_node'
,
path
.
name
))
elif
path
.
name
!=
'nni_manager.tsbuildinfo'
:
shutil
.
copyfile
(
path
,
Path
(
'nni_node'
,
path
.
name
))
shutil
.
copytree
(
'ts/nni_manager/dist'
,
'nni_node'
,
dirs_exist_ok
=
True
)
shutil
.
copyfile
(
'ts/nni_manager/yarn.lock'
,
'nni_node/yarn.lock'
)
Path
(
'nni_node/nni_manager.tsbuildinfo'
).
unlink
()
package_json
=
json
.
load
(
open
(
'ts/nni_manager/package.json'
))
if
version
:
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment