Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nni
Commits
594924a9
"src/include/gridwise_direct_convolution_1.hip.hpp" did not exist on "84d9802d30de16795e63a8625098634527c80ae4"
Commit
594924a9
authored
Nov 18, 2019
by
quzha
Browse files
Merge branch 'master' of github.com:Microsoft/nni into dev-nas-refactor
parents
d43fbe82
262fabf1
Changes
37
Hide whitespace changes
Inline
Side-by-side
Showing
17 changed files
with
521 additions
and
278 deletions
+521
-278
src/sdk/pynni/nni/medianstop_assessor/medianstop_assessor.py
src/sdk/pynni/nni/medianstop_assessor/medianstop_assessor.py
+28
-28
src/sdk/pynni/nni/ppo_tuner/__init__.py
src/sdk/pynni/nni/ppo_tuner/__init__.py
+1
-0
src/sdk/pynni/nni/ppo_tuner/distri.py
src/sdk/pynni/nni/ppo_tuner/distri.py
+3
-3
src/sdk/pynni/nni/ppo_tuner/model.py
src/sdk/pynni/nni/ppo_tuner/model.py
+13
-12
src/sdk/pynni/nni/ppo_tuner/policy.py
src/sdk/pynni/nni/ppo_tuner/policy.py
+43
-17
src/sdk/pynni/nni/ppo_tuner/ppo_tuner.py
src/sdk/pynni/nni/ppo_tuner/ppo_tuner.py
+139
-58
src/sdk/pynni/nni/ppo_tuner/util.py
src/sdk/pynni/nni/ppo_tuner/util.py
+23
-14
src/sdk/pynni/nni/smac_tuner/convert_ss_to_scenario.py
src/sdk/pynni/nni/smac_tuner/convert_ss_to_scenario.py
+12
-3
src/sdk/pynni/nni/smac_tuner/smac_tuner.py
src/sdk/pynni/nni/smac_tuner/smac_tuner.py
+70
-39
src/sdk/pynni/nni/trial.py
src/sdk/pynni/nni/trial.py
+62
-6
src/sdk/pynni/nni/tuner.py
src/sdk/pynni/nni/tuner.py
+4
-2
src/sdk/pynni/tests/test_compressor.py
src/sdk/pynni/tests/test_compressor.py
+50
-83
test/pipelines-it-installation.yml
test/pipelines-it-installation.yml
+45
-0
tools/nni_cmd/config_utils.py
tools/nni_cmd/config_utils.py
+2
-1
tools/nni_cmd/constants.py
tools/nni_cmd/constants.py
+1
-1
tools/nni_cmd/launcher.py
tools/nni_cmd/launcher.py
+4
-3
tools/nni_cmd/nnictl_utils.py
tools/nni_cmd/nnictl_utils.py
+21
-8
No files found.
src/sdk/pynni/nni/medianstop_assessor/medianstop_assessor.py
View file @
594924a9
...
@@ -27,21 +27,21 @@ class MedianstopAssessor(Assessor):
...
@@ -27,21 +27,21 @@ class MedianstopAssessor(Assessor):
Parameters
Parameters
----------
----------
optimize_mode: str
optimize_mode
: str
optimize mode, 'maximize' or 'minimize'
optimize mode, 'maximize' or 'minimize'
start_step: int
start_step
: int
only after receiving start_step number of reported intermediate results
only after receiving start_step number of reported intermediate results
"""
"""
def
__init__
(
self
,
optimize_mode
=
'maximize'
,
start_step
=
0
):
def
__init__
(
self
,
optimize_mode
=
'maximize'
,
start_step
=
0
):
self
.
start_step
=
start_step
self
.
_
start_step
=
start_step
self
.
running_history
=
dict
()
self
.
_
running_history
=
dict
()
self
.
completed_avg_history
=
dict
()
self
.
_
completed_avg_history
=
dict
()
if
optimize_mode
==
'maximize'
:
if
optimize_mode
==
'maximize'
:
self
.
high_better
=
True
self
.
_
high_better
=
True
elif
optimize_mode
==
'minimize'
:
elif
optimize_mode
==
'minimize'
:
self
.
high_better
=
False
self
.
_
high_better
=
False
else
:
else
:
self
.
high_better
=
True
self
.
_
high_better
=
True
logger
.
warning
(
'unrecognized optimize_mode %s'
,
optimize_mode
)
logger
.
warning
(
'unrecognized optimize_mode %s'
,
optimize_mode
)
def
_update_data
(
self
,
trial_job_id
,
trial_history
):
def
_update_data
(
self
,
trial_job_id
,
trial_history
):
...
@@ -49,35 +49,35 @@ class MedianstopAssessor(Assessor):
...
@@ -49,35 +49,35 @@ class MedianstopAssessor(Assessor):
Parameters
Parameters
----------
----------
trial_job_id: int
trial_job_id
: int
trial job id
trial job id
trial_history: list
trial_history
: list
The history performance matrix of each trial
The history performance matrix of each trial
"""
"""
if
trial_job_id
not
in
self
.
running_history
:
if
trial_job_id
not
in
self
.
_
running_history
:
self
.
running_history
[
trial_job_id
]
=
[]
self
.
_
running_history
[
trial_job_id
]
=
[]
self
.
running_history
[
trial_job_id
].
extend
(
trial_history
[
len
(
self
.
running_history
[
trial_job_id
]):])
self
.
_
running_history
[
trial_job_id
].
extend
(
trial_history
[
len
(
self
.
_
running_history
[
trial_job_id
]):])
def
trial_end
(
self
,
trial_job_id
,
success
):
def
trial_end
(
self
,
trial_job_id
,
success
):
"""trial_end
"""trial_end
Parameters
Parameters
----------
----------
trial_job_id: int
trial_job_id
: int
trial job id
trial job id
success: bool
success
: bool
True if succssfully finish the experiment, False otherwise
True if succssfully finish the experiment, False otherwise
"""
"""
if
trial_job_id
in
self
.
running_history
:
if
trial_job_id
in
self
.
_
running_history
:
if
success
:
if
success
:
cnt
=
0
cnt
=
0
history_sum
=
0
history_sum
=
0
self
.
completed_avg_history
[
trial_job_id
]
=
[]
self
.
_
completed_avg_history
[
trial_job_id
]
=
[]
for
each
in
self
.
running_history
[
trial_job_id
]:
for
each
in
self
.
_
running_history
[
trial_job_id
]:
cnt
+=
1
cnt
+=
1
history_sum
+=
each
history_sum
+=
each
self
.
completed_avg_history
[
trial_job_id
].
append
(
history_sum
/
cnt
)
self
.
_
completed_avg_history
[
trial_job_id
].
append
(
history_sum
/
cnt
)
self
.
running_history
.
pop
(
trial_job_id
)
self
.
_
running_history
.
pop
(
trial_job_id
)
else
:
else
:
logger
.
warning
(
'trial_end: trial_job_id does not exist in running_history'
)
logger
.
warning
(
'trial_end: trial_job_id does not exist in running_history'
)
...
@@ -86,9 +86,9 @@ class MedianstopAssessor(Assessor):
...
@@ -86,9 +86,9 @@ class MedianstopAssessor(Assessor):
Parameters
Parameters
----------
----------
trial_job_id: int
trial_job_id
: int
trial job id
trial job id
trial_history: list
trial_history
: list
The history performance matrix of each trial
The history performance matrix of each trial
Returns
Returns
...
@@ -102,7 +102,7 @@ class MedianstopAssessor(Assessor):
...
@@ -102,7 +102,7 @@ class MedianstopAssessor(Assessor):
unrecognize exception in medianstop_assessor
unrecognize exception in medianstop_assessor
"""
"""
curr_step
=
len
(
trial_history
)
curr_step
=
len
(
trial_history
)
if
curr_step
<
self
.
start_step
:
if
curr_step
<
self
.
_
start_step
:
return
AssessResult
.
Good
return
AssessResult
.
Good
try
:
try
:
...
@@ -115,18 +115,18 @@ class MedianstopAssessor(Assessor):
...
@@ -115,18 +115,18 @@ class MedianstopAssessor(Assessor):
logger
.
exception
(
error
)
logger
.
exception
(
error
)
self
.
_update_data
(
trial_job_id
,
num_trial_history
)
self
.
_update_data
(
trial_job_id
,
num_trial_history
)
if
self
.
high_better
:
if
self
.
_
high_better
:
best_history
=
max
(
trial_history
)
best_history
=
max
(
trial_history
)
else
:
else
:
best_history
=
min
(
trial_history
)
best_history
=
min
(
trial_history
)
avg_array
=
[]
avg_array
=
[]
for
id_
in
self
.
completed_avg_history
:
for
id_
in
self
.
_
completed_avg_history
:
if
len
(
self
.
completed_avg_history
[
id_
])
>=
curr_step
:
if
len
(
self
.
_
completed_avg_history
[
id_
])
>=
curr_step
:
avg_array
.
append
(
self
.
completed_avg_history
[
id_
][
curr_step
-
1
])
avg_array
.
append
(
self
.
_
completed_avg_history
[
id_
][
curr_step
-
1
])
if
avg_array
:
if
avg_array
:
avg_array
.
sort
()
avg_array
.
sort
()
if
self
.
high_better
:
if
self
.
_
high_better
:
median
=
avg_array
[(
len
(
avg_array
)
-
1
)
//
2
]
median
=
avg_array
[(
len
(
avg_array
)
-
1
)
//
2
]
return
AssessResult
.
Bad
if
best_history
<
median
else
AssessResult
.
Good
return
AssessResult
.
Bad
if
best_history
<
median
else
AssessResult
.
Good
else
:
else
:
...
...
src/sdk/pynni/nni/ppo_tuner/__init__.py
View file @
594924a9
from
.ppo_tuner
import
PPOTuner
src/sdk/pynni/nni/ppo_tuner/distri.py
View file @
594924a9
...
@@ -77,7 +77,7 @@ class PdType:
...
@@ -77,7 +77,7 @@ class PdType:
class
CategoricalPd
(
Pd
):
class
CategoricalPd
(
Pd
):
"""
"""
c
ategorical prossibility distribution
C
ategorical prossibility distribution
"""
"""
def
__init__
(
self
,
logits
,
mask_npinf
,
nsteps
,
size
,
is_act_model
):
def
__init__
(
self
,
logits
,
mask_npinf
,
nsteps
,
size
,
is_act_model
):
self
.
logits
=
logits
self
.
logits
=
logits
...
@@ -154,7 +154,7 @@ class CategoricalPd(Pd):
...
@@ -154,7 +154,7 @@ class CategoricalPd(Pd):
class
CategoricalPdType
(
PdType
):
class
CategoricalPdType
(
PdType
):
"""
"""
t
o create CategoricalPd
T
o create CategoricalPd
"""
"""
def
__init__
(
self
,
ncat
,
nsteps
,
np_mask
,
is_act_model
):
def
__init__
(
self
,
ncat
,
nsteps
,
np_mask
,
is_act_model
):
self
.
ncat
=
ncat
self
.
ncat
=
ncat
...
@@ -180,7 +180,7 @@ class CategoricalPdType(PdType):
...
@@ -180,7 +180,7 @@ class CategoricalPdType(PdType):
def
_matching_fc
(
tensor
,
name
,
size
,
nsteps
,
init_scale
,
init_bias
,
np_mask
,
is_act_model
):
def
_matching_fc
(
tensor
,
name
,
size
,
nsteps
,
init_scale
,
init_bias
,
np_mask
,
is_act_model
):
"""
"""
a
dd fc op, and add mask op when not in action mode
A
dd fc op, and add mask op when not in action mode
"""
"""
if
tensor
.
shape
[
-
1
]
==
size
:
if
tensor
.
shape
[
-
1
]
==
size
:
assert
False
assert
False
...
...
src/sdk/pynni/nni/ppo_tuner/model.py
View file @
594924a9
...
@@ -28,21 +28,18 @@ from .util import initialize, get_session
...
@@ -28,21 +28,18 @@ from .util import initialize, get_session
class
Model
:
class
Model
:
"""
"""
We use this object to :
We use this object to :
__init__:
__init__:
- Creates the step_model
- Creates the step_model
- Creates the train_model
- Creates the train_model
train():
train():
- Make the training part (feedforward and retropropagation of gradients)
- Make the training part (feedforward and retropropagation of gradients)
save/load():
save/load():
- Save load the model
- Save load the model
"""
"""
def
__init__
(
self
,
*
,
policy
,
nbatch_act
,
nbatch_train
,
def
__init__
(
self
,
*
,
policy
,
nbatch_act
,
nbatch_train
,
nsteps
,
ent_coef
,
vf_coef
,
max_grad_norm
,
microbatch_size
=
None
,
np_mask
=
None
):
nsteps
,
ent_coef
,
vf_coef
,
max_grad_norm
,
microbatch_size
=
None
,
np_mask
=
None
):
"""
init
"""
self
.
sess
=
sess
=
get_session
()
self
.
sess
=
sess
=
get_session
()
with
tf
.
variable_scope
(
'ppo2_model'
,
reuse
=
tf
.
AUTO_REUSE
):
with
tf
.
variable_scope
(
'ppo2_model'
,
reuse
=
tf
.
AUTO_REUSE
):
...
@@ -137,9 +134,13 @@ class Model:
...
@@ -137,9 +134,13 @@ class Model:
def
train
(
self
,
lr
,
cliprange
,
obs
,
returns
,
masks
,
actions
,
values
,
neglogpacs
,
states
=
None
):
def
train
(
self
,
lr
,
cliprange
,
obs
,
returns
,
masks
,
actions
,
values
,
neglogpacs
,
states
=
None
):
"""
"""
t
rain the model.
T
rain the model.
Here we calculate advantage A(s,a) = R + yV(s') - V(s)
Here we calculate advantage A(s,a) = R + yV(s') - V(s)
Returns = R + yV(s')
Returns
-------
obj
= R + yV(s')
"""
"""
advs
=
returns
-
values
advs
=
returns
-
values
...
...
src/sdk/pynni/nni/ppo_tuner/policy.py
View file @
594924a9
...
@@ -34,14 +34,20 @@ class PolicyWithValue:
...
@@ -34,14 +34,20 @@ class PolicyWithValue:
def
__init__
(
self
,
env
,
observations
,
latent
,
estimate_q
=
False
,
vf_latent
=
None
,
sess
=
None
,
np_mask
=
None
,
is_act_model
=
False
,
**
tensors
):
def
__init__
(
self
,
env
,
observations
,
latent
,
estimate_q
=
False
,
vf_latent
=
None
,
sess
=
None
,
np_mask
=
None
,
is_act_model
=
False
,
**
tensors
):
"""
"""
Parameters
:
Parameters
----------
----------
env: RL environment
env : obj
observations: tensorflow placeholder in which the observations will be fed
RL environment
latent: latent state from which policy distribution parameters should be inferred
observations : tensorflow placeholder
vf_latent: latent state from which value function should be inferred (if None, then latent is used)
Tensorflow placeholder in which the observations will be fed
sess: tensorflow session to run calculations in (if None, default session is used)
latent : tensor
**tensors: tensorflow tensors for additional attributes such as state or mask
Latent state from which policy distribution parameters should be inferred
vf_latent : tensor
Latent state from which value function should be inferred (if None, then latent is used)
sess : tensorflow session
Tensorflow session to run calculations in (if None, default session is used)
**tensors
Tensorflow tensors for additional attributes such as state or mask
"""
"""
self
.
X
=
observations
self
.
X
=
observations
...
@@ -138,12 +144,14 @@ class PolicyWithValue:
...
@@ -138,12 +144,14 @@ class PolicyWithValue:
"""
"""
Compute next action(s) given the observation(s)
Compute next action(s) given the observation(s)
Parameters
:
Parameters
----------
----------
observation: observation data (either single or a batch)
observation : np array
**extra_feed: additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Observation data (either single or a batch)
**extra_feed
Additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns
:
Returns
-------
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
"""
...
@@ -157,22 +165,40 @@ class PolicyWithValue:
...
@@ -157,22 +165,40 @@ class PolicyWithValue:
"""
"""
Compute value estimate(s) given the observation(s)
Compute value estimate(s) given the observation(s)
Parameters
:
Parameters
----------
----------
observation: observation data (either single or a batch)
observation : np array
**extra_feed: additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Observation data (either single or a batch)
**extra_feed
Additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns
:
Returns
-------
-------
v
alue estimate
V
alue estimate
"""
"""
return
self
.
_evaluate
(
self
.
vf
,
ob
,
*
args
,
**
kwargs
)
return
self
.
_evaluate
(
self
.
vf
,
ob
,
*
args
,
**
kwargs
)
def
build_lstm_policy
(
model_config
,
value_network
=
None
,
estimate_q
=
False
,
**
policy_kwargs
):
def
build_lstm_policy
(
model_config
,
value_network
=
None
,
estimate_q
=
False
,
**
policy_kwargs
):
"""
"""
b
uild lstm policy and value network, they share the same lstm network.
B
uild lstm policy and value network, they share the same lstm network.
the parameters all use their default values.
the parameters all use their default values.
Parameter
---------
model_config : obj
Configurations of the model
value_network : obj
The network for value function
estimate_q : bool
Whether to estimate ``q``
**policy_kwargs
The kwargs for policy network, i.e., lstm model
Returns
-------
func
The policy network
"""
"""
policy_network
=
lstm_model
(
**
policy_kwargs
)
policy_network
=
lstm_model
(
**
policy_kwargs
)
...
...
src/sdk/pynni/nni/ppo_tuner/ppo_tuner.py
View file @
594924a9
...
@@ -38,8 +38,10 @@ from .policy import build_lstm_policy
...
@@ -38,8 +38,10 @@ from .policy import build_lstm_policy
logger
=
logging
.
getLogger
(
'ppo_tuner_AutoML'
)
logger
=
logging
.
getLogger
(
'ppo_tuner_AutoML'
)
def
constfn
(
val
):
def
_constfn
(
val
):
"""wrap as function"""
"""
Wrap as function
"""
def
f
(
_
):
def
f
(
_
):
return
val
return
val
return
f
return
f
...
@@ -90,7 +92,7 @@ class TrialsInfo:
...
@@ -90,7 +92,7 @@ class TrialsInfo:
def
get_next
(
self
):
def
get_next
(
self
):
"""
"""
g
et actions of the next trial
G
et actions of the next trial
"""
"""
if
self
.
iter
>=
self
.
inf_batch_size
:
if
self
.
iter
>=
self
.
inf_batch_size
:
return
None
,
None
return
None
,
None
...
@@ -102,14 +104,14 @@ class TrialsInfo:
...
@@ -102,14 +104,14 @@ class TrialsInfo:
def
update_rewards
(
self
,
rewards
,
returns
):
def
update_rewards
(
self
,
rewards
,
returns
):
"""
"""
a
fter the trial is finished, reward and return of this trial is updated
A
fter the trial is finished, reward and return of this trial is updated
"""
"""
self
.
rewards
=
rewards
self
.
rewards
=
rewards
self
.
returns
=
returns
self
.
returns
=
returns
def
convert_shape
(
self
):
def
convert_shape
(
self
):
"""
"""
c
onvert shape
C
onvert shape
"""
"""
def
sf01
(
arr
):
def
sf01
(
arr
):
"""
"""
...
@@ -138,9 +140,9 @@ class PPOModel:
...
@@ -138,9 +140,9 @@ class PPOModel:
set_global_seeds
(
None
)
set_global_seeds
(
None
)
assert
isinstance
(
self
.
model_config
.
lr
,
float
)
assert
isinstance
(
self
.
model_config
.
lr
,
float
)
self
.
lr
=
constfn
(
self
.
model_config
.
lr
)
self
.
lr
=
_
constfn
(
self
.
model_config
.
lr
)
assert
isinstance
(
self
.
model_config
.
cliprange
,
float
)
assert
isinstance
(
self
.
model_config
.
cliprange
,
float
)
self
.
cliprange
=
constfn
(
self
.
model_config
.
cliprange
)
self
.
cliprange
=
_
constfn
(
self
.
model_config
.
cliprange
)
# build lstm policy network, value share the same network
# build lstm policy network, value share the same network
policy
=
build_lstm_policy
(
model_config
)
policy
=
build_lstm_policy
(
model_config
)
...
@@ -165,12 +167,28 @@ class PPOModel:
...
@@ -165,12 +167,28 @@ class PPOModel:
def
inference
(
self
,
num
):
def
inference
(
self
,
num
):
"""
"""
g
enerate actions along with related info from policy network.
G
enerate actions along with related info from policy network.
observation is the action of the last step.
observation is the action of the last step.
Parameters
:
Parameters
----------
----------
num: the number of trials to generate
num: int
The number of trials to generate
Returns
-------
mb_obs : list
Observation of the ``num`` configurations
mb_actions : list
Actions of the ``num`` configurations
mb_values : list
Values from the value function of the ``num`` configurations
mb_neglogpacs : list
``neglogp`` of the ``num`` configurations
mb_dones : list
To show whether the play is done, always ``True``
last_values : tensorflow tensor
The last values of the ``num`` configurations, got with session run
"""
"""
# Here, we init the lists that will contain the mb of experiences
# Here, we init the lists that will contain the mb of experiences
mb_obs
,
mb_actions
,
mb_values
,
mb_dones
,
mb_neglogpacs
=
[],
[],
[],
[],
[]
mb_obs
,
mb_actions
,
mb_values
,
mb_dones
,
mb_neglogpacs
=
[],
[],
[],
[],
[]
...
@@ -212,13 +230,15 @@ class PPOModel:
...
@@ -212,13 +230,15 @@ class PPOModel:
def
compute_rewards
(
self
,
trials_info
,
trials_result
):
def
compute_rewards
(
self
,
trials_info
,
trials_result
):
"""
"""
c
ompute the rewards of the trials in trials_info based on trials_result,
C
ompute the rewards of the trials in trials_info based on trials_result,
and update the rewards in trials_info
and update the rewards in trials_info
Parameters
:
Parameters
----------
----------
trials_info: info of the generated trials
trials_info : TrialsInfo
trials_result: final results (e.g., acc) of the generated trials
Info of the generated trials
trials_result : list
Final results (e.g., acc) of the generated trials
"""
"""
mb_rewards
=
np
.
asarray
([
trials_result
for
_
in
trials_info
.
actions
],
dtype
=
np
.
float32
)
mb_rewards
=
np
.
asarray
([
trials_result
for
_
in
trials_info
.
actions
],
dtype
=
np
.
float32
)
# discount/bootstrap off value fn
# discount/bootstrap off value fn
...
@@ -243,12 +263,14 @@ class PPOModel:
...
@@ -243,12 +263,14 @@ class PPOModel:
def
train
(
self
,
trials_info
,
nenvs
):
def
train
(
self
,
trials_info
,
nenvs
):
"""
"""
t
rain the policy/value network using trials_info
T
rain the policy/value network using trials_info
Parameters
:
Parameters
----------
----------
trials_info: complete info of the generated trials from the previous inference
trials_info : TrialsInfo
nenvs: the batch size of the (previous) inference
Complete info of the generated trials from the previous inference
nenvs : int
The batch size of the (previous) inference
"""
"""
# keep frac decay for future optimization
# keep frac decay for future optimization
if
self
.
cur_update
<=
self
.
nupdates
:
if
self
.
cur_update
<=
self
.
nupdates
:
...
@@ -282,27 +304,40 @@ class PPOModel:
...
@@ -282,27 +304,40 @@ class PPOModel:
class
PPOTuner
(
Tuner
):
class
PPOTuner
(
Tuner
):
"""
"""
PPOTuner
PPOTuner, the implementation inherits the main logic of the implementation
[ppo2 from openai](https://github.com/openai/baselines/tree/master/baselines/ppo2), and is adapted for NAS scenario.
It uses ``lstm`` for its policy network and value network, policy and value share the same network.
"""
"""
def
__init__
(
self
,
optimize_mode
,
trials_per_update
=
20
,
epochs_per_update
=
4
,
minibatch_size
=
4
,
def
__init__
(
self
,
optimize_mode
,
trials_per_update
=
20
,
epochs_per_update
=
4
,
minibatch_size
=
4
,
ent_coef
=
0.0
,
lr
=
3e-4
,
vf_coef
=
0.5
,
max_grad_norm
=
0.5
,
gamma
=
0.99
,
lam
=
0.95
,
cliprange
=
0.2
):
ent_coef
=
0.0
,
lr
=
3e-4
,
vf_coef
=
0.5
,
max_grad_norm
=
0.5
,
gamma
=
0.99
,
lam
=
0.95
,
cliprange
=
0.2
):
"""
"""
i
nitialization, PPO model is not initialized here as search space is not received yet.
I
nitialization, PPO model is not initialized here as search space is not received yet.
Parameters
:
Parameters
----------
----------
optimize_mode: maximize or minimize
optimize_mode : str
trials_per_update: number of trials to have for each model update
maximize or minimize
epochs_per_update: number of epochs to run for each model update
trials_per_update : int
minibatch_size: minibatch size (number of trials) for the update
Number of trials to have for each model update
ent_coef: policy entropy coefficient in the optimization objective
epochs_per_update : int
lr: learning rate of the model (lstm network), constant
Number of epochs to run for each model update
vf_coef: value function loss coefficient in the optimization objective
minibatch_size : int
max_grad_norm: gradient norm clipping coefficient
Minibatch size (number of trials) for the update
gamma: discounting factor
ent_coef : float
lam: advantage estimation discounting factor (lambda in the paper)
Policy entropy coefficient in the optimization objective
cliprange: cliprange in the PPO algorithm, constant
lr : float
Learning rate of the model (lstm network), constant
vf_coef : float
Value function loss coefficient in the optimization objective
max_grad_norm : float
Gradient norm clipping coefficient
gamma : float
Discounting factor
lam : float
Advantage estimation discounting factor (lambda in the paper)
cliprange : float
Cliprange in the PPO algorithm, constant
"""
"""
self
.
optimize_mode
=
OptimizeMode
(
optimize_mode
)
self
.
optimize_mode
=
OptimizeMode
(
optimize_mode
)
self
.
model_config
=
ModelConfig
()
self
.
model_config
=
ModelConfig
()
...
@@ -330,21 +365,25 @@ class PPOTuner(Tuner):
...
@@ -330,21 +365,25 @@ class PPOTuner(Tuner):
self
.
model_config
.
nminibatches
=
minibatch_size
self
.
model_config
.
nminibatches
=
minibatch_size
self
.
send_trial_callback
=
None
self
.
send_trial_callback
=
None
logger
.
info
(
'
=== f
inished PPOTuner initialization'
)
logger
.
info
(
'
F
inished PPOTuner initialization'
)
def
_process_one_nas_space
(
self
,
block_name
,
block_space
):
def
_process_one_nas_space
(
self
,
block_name
,
block_space
):
"""
"""
p
rocess nas space to determine observation space and action space
P
rocess nas space to determine observation space and action space
Parameters
:
Parameters
----------
----------
block_name: the name of the mutable block
block_name : str
block_space: search space of this mutable block
The name of the mutable block
block_space : dict
Search space of this mutable block
Returns:
Returns
----------
-------
actions_spaces: list of the space of each action
actions_spaces : list
actions_to_config: the mapping from action to generated configuration
List of the space of each action
actions_to_config : list
The mapping from action to generated configuration
"""
"""
actions_spaces
=
[]
actions_spaces
=
[]
actions_to_config
=
[]
actions_to_config
=
[]
...
@@ -385,7 +424,7 @@ class PPOTuner(Tuner):
...
@@ -385,7 +424,7 @@ class PPOTuner(Tuner):
def
_process_nas_space
(
self
,
search_space
):
def
_process_nas_space
(
self
,
search_space
):
"""
"""
p
rocess nas search space to get action/observation space
P
rocess nas search space to get action/observation space
"""
"""
actions_spaces
=
[]
actions_spaces
=
[]
actions_to_config
=
[]
actions_to_config
=
[]
...
@@ -412,7 +451,7 @@ class PPOTuner(Tuner):
...
@@ -412,7 +451,7 @@ class PPOTuner(Tuner):
def
_generate_action_mask
(
self
):
def
_generate_action_mask
(
self
):
"""
"""
d
ifferent step could have different action space. to deal with this case, we merge all the
D
ifferent step could have different action space. to deal with this case, we merge all the
possible actions into one action space, and use mask to indicate available actions for each step
possible actions into one action space, and use mask to indicate available actions for each step
"""
"""
two_masks
=
[]
two_masks
=
[]
...
@@ -439,15 +478,13 @@ class PPOTuner(Tuner):
...
@@ -439,15 +478,13 @@ class PPOTuner(Tuner):
def
update_search_space
(
self
,
search_space
):
def
update_search_space
(
self
,
search_space
):
"""
"""
g
et search space, currently the space only includes that for NAS
G
et search space, currently the space only includes that for NAS
Parameters
:
Parameters
----------
----------
search_space: search space for NAS
search_space : dict
Search space for NAS
Returns:
the format could be referred to search space spec (https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html).
-------
no return
"""
"""
logger
.
info
(
'=== update search space %s'
,
search_space
)
logger
.
info
(
'=== update search space %s'
,
search_space
)
assert
self
.
search_space
is
None
assert
self
.
search_space
is
None
...
@@ -470,7 +507,7 @@ class PPOTuner(Tuner):
...
@@ -470,7 +507,7 @@ class PPOTuner(Tuner):
def
_actions_to_config
(
self
,
actions
):
def
_actions_to_config
(
self
,
actions
):
"""
"""
g
iven actions, to generate the corresponding trial configuration
G
iven actions, to generate the corresponding trial configuration
"""
"""
chosen_arch
=
copy
.
deepcopy
(
self
.
chosen_arch_template
)
chosen_arch
=
copy
.
deepcopy
(
self
.
chosen_arch_template
)
for
cnt
,
act
in
enumerate
(
actions
):
for
cnt
,
act
in
enumerate
(
actions
):
...
@@ -490,6 +527,19 @@ class PPOTuner(Tuner):
...
@@ -490,6 +527,19 @@ class PPOTuner(Tuner):
def
generate_multiple_parameters
(
self
,
parameter_id_list
,
**
kwargs
):
def
generate_multiple_parameters
(
self
,
parameter_id_list
,
**
kwargs
):
"""
"""
Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects.
Returns multiple sets of trial (hyper-)parameters, as iterable of serializable objects.
Parameters
----------
parameter_id_list : list of int
Unique identifiers for each set of requested hyper-parameters.
These will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
-------
list
A list of newly generated configurations
"""
"""
result
=
[]
result
=
[]
self
.
send_trial_callback
=
kwargs
[
'st_callback'
]
self
.
send_trial_callback
=
kwargs
[
'st_callback'
]
...
@@ -506,7 +556,17 @@ class PPOTuner(Tuner):
...
@@ -506,7 +556,17 @@ class PPOTuner(Tuner):
def
generate_parameters
(
self
,
parameter_id
,
**
kwargs
):
def
generate_parameters
(
self
,
parameter_id
,
**
kwargs
):
"""
"""
generate parameters, if no trial configration for now, self.credit plus 1 to send the config later
Generate parameters, if no trial configration for now, self.credit plus 1 to send the config later
parameter_id : int
Unique identifier for requested hyper-parameters. This will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
-------
dict
One newly generated configuration
"""
"""
if
self
.
first_inf
:
if
self
.
first_inf
:
self
.
trials_result
=
[
None
for
_
in
range
(
self
.
inf_batch_size
)]
self
.
trials_result
=
[
None
for
_
in
range
(
self
.
inf_batch_size
)]
...
@@ -527,6 +587,7 @@ class PPOTuner(Tuner):
...
@@ -527,6 +587,7 @@ class PPOTuner(Tuner):
def
_next_round_inference
(
self
):
def
_next_round_inference
(
self
):
"""
"""
Run a inference to generate next batch of configurations
"""
"""
self
.
finished_trials
=
0
self
.
finished_trials
=
0
self
.
model
.
compute_rewards
(
self
.
trials_info
,
self
.
trials_result
)
self
.
model
.
compute_rewards
(
self
.
trials_info
,
self
.
trials_result
)
...
@@ -554,8 +615,17 @@ class PPOTuner(Tuner):
...
@@ -554,8 +615,17 @@ class PPOTuner(Tuner):
def
receive_trial_result
(
self
,
parameter_id
,
parameters
,
value
,
**
kwargs
):
def
receive_trial_result
(
self
,
parameter_id
,
parameters
,
value
,
**
kwargs
):
"""
"""
receive trial's result. if the number of finished trials equals self.inf_batch_size, start the next update to
Receive trial's result. if the number of finished trials equals self.inf_batch_size, start the next update to
train the model
train the model.
Parameters
----------
parameter_id : int
Unique identifier of used hyper-parameters, same with :meth:`generate_parameters`.
parameters : dict
Hyper-parameters generated by :meth:`generate_parameters`.
value : dict
Result from trial (the return value of :func:`nni.report_final_result`).
"""
"""
trial_info_idx
=
self
.
running_trials
.
pop
(
parameter_id
,
None
)
trial_info_idx
=
self
.
running_trials
.
pop
(
parameter_id
,
None
)
assert
trial_info_idx
is
not
None
assert
trial_info_idx
is
not
None
...
@@ -572,7 +642,17 @@ class PPOTuner(Tuner):
...
@@ -572,7 +642,17 @@ class PPOTuner(Tuner):
def
trial_end
(
self
,
parameter_id
,
success
,
**
kwargs
):
def
trial_end
(
self
,
parameter_id
,
success
,
**
kwargs
):
"""
"""
to deal with trial failure
To deal with trial failure. If a trial fails, it is popped out from ``self.running_trials``,
and the final result of this trial is assigned with the average of the finished trials.
Parameters
----------
parameter_id : int
Unique identifier for hyper-parameters used by this trial.
success : bool
True if the trial successfully completed; False if failed or terminated.
**kwargs
Not used
"""
"""
if
not
success
:
if
not
success
:
if
parameter_id
not
in
self
.
running_trials
:
if
parameter_id
not
in
self
.
running_trials
:
...
@@ -582,7 +662,7 @@ class PPOTuner(Tuner):
...
@@ -582,7 +662,7 @@ class PPOTuner(Tuner):
assert
trial_info_idx
is
not
None
assert
trial_info_idx
is
not
None
# use mean of finished trials as the result of this failed trial
# use mean of finished trials as the result of this failed trial
values
=
[
val
for
val
in
self
.
trials_result
if
val
is
not
None
]
values
=
[
val
for
val
in
self
.
trials_result
if
val
is
not
None
]
logger
.
warning
(
'
zql
values: %s'
,
values
)
logger
.
warning
(
'
In trial_end,
values: %s'
,
values
)
self
.
trials_result
[
trial_info_idx
]
=
(
sum
(
values
)
/
len
(
values
))
if
values
else
0
self
.
trials_result
[
trial_info_idx
]
=
(
sum
(
values
)
/
len
(
values
))
if
values
else
0
self
.
finished_trials
+=
1
self
.
finished_trials
+=
1
if
self
.
finished_trials
==
self
.
inf_batch_size
:
if
self
.
finished_trials
==
self
.
inf_batch_size
:
...
@@ -590,10 +670,11 @@ class PPOTuner(Tuner):
...
@@ -590,10 +670,11 @@ class PPOTuner(Tuner):
def
import_data
(
self
,
data
):
def
import_data
(
self
,
data
):
"""
"""
Import additional data for tuning
Import additional data for tuning
, not supported yet.
Parameters
Parameters
----------
----------
data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'
data : list
A list of dictionarys, each of which has at least two keys, ``parameter`` and ``value``
"""
"""
logger
.
warning
(
'PPOTuner cannot leverage imported data.'
)
logger
.
warning
(
'PPOTuner cannot leverage imported data.'
)
src/sdk/pynni/nni/ppo_tuner/util.py
View file @
594924a9
...
@@ -94,12 +94,14 @@ def lstm_model(nlstm=128, layer_norm=False):
...
@@ -94,12 +94,14 @@ def lstm_model(nlstm=128, layer_norm=False):
An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example
An example of usage of lstm-based policy can be found here: common/tests/test_doc_examples.py/test_lstm_example
Parameters
:
Parameters
----------
----------
nlstm: int LSTM hidden state size
nlstm : int
layer_norm: bool if True, layer-normalized version of LSTM is used
LSTM hidden state size
layer_norm : bool
if True, layer-normalized version of LSTM is used
Returns
:
Returns
-------
-------
function that builds LSTM with a given input tensor / placeholder
function that builds LSTM with a given input tensor / placeholder
"""
"""
...
@@ -171,11 +173,15 @@ def adjust_shape(placeholder, data):
...
@@ -171,11 +173,15 @@ def adjust_shape(placeholder, data):
adjust shape of the data to the shape of the placeholder if possible.
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
If shape is incompatible, AssertionError is thrown
Parameters:
Parameters
placeholder: tensorflow input placeholder
----------
data: input data to be (potentially) reshaped to be fed into placeholder
placeholder
tensorflow input placeholder
data
input data to be (potentially) reshaped to be fed into placeholder
Returns:
Returns
-------
reshaped data
reshaped data
"""
"""
if
not
isinstance
(
data
,
np
.
ndarray
)
and
not
isinstance
(
data
,
list
):
if
not
isinstance
(
data
,
np
.
ndarray
)
and
not
isinstance
(
data
,
list
):
...
@@ -230,13 +236,16 @@ def observation_placeholder(ob_space, batch_size=None, name='Ob'):
...
@@ -230,13 +236,16 @@ def observation_placeholder(ob_space, batch_size=None, name='Ob'):
"""
"""
Create placeholder to feed observations into of the size appropriate to the observation space
Create placeholder to feed observations into of the size appropriate to the observation space
Parameters
:
Parameters
----------
----------
ob_space: gym.Space observation space
ob_space : gym.Space
batch_size: int size of the batch to be fed into input. Can be left None in most cases.
observation space
name: str name of the placeholder
batch_size : int
size of the batch to be fed into input. Can be left None in most cases.
Returns:
name : str
name of the placeholder
Returns
-------
-------
tensorflow placeholder tensor
tensorflow placeholder tensor
"""
"""
...
...
src/sdk/pynni/nni/smac_tuner/convert_ss_to_scenario.py
View file @
594924a9
...
@@ -24,11 +24,14 @@ import numpy as np
...
@@ -24,11 +24,14 @@ import numpy as np
def
get_json_content
(
file_path
):
def
get_json_content
(
file_path
):
"""Load json file content
"""
Load json file content
Parameters
Parameters
----------
----------
file_path:
file_path:
path to the file
path to the file
Raises
Raises
------
------
TypeError
TypeError
...
@@ -43,7 +46,8 @@ def get_json_content(file_path):
...
@@ -43,7 +46,8 @@ def get_json_content(file_path):
def
generate_pcs
(
nni_search_space_content
):
def
generate_pcs
(
nni_search_space_content
):
"""Generate the Parameter Configuration Space (PCS) which defines the
"""
Generate the Parameter Configuration Space (PCS) which defines the
legal ranges of the parameters to be optimized and their default values.
legal ranges of the parameters to be optimized and their default values.
Generally, the format is:
Generally, the format is:
# parameter_name categorical {value_1, ..., value_N} [default value]
# parameter_name categorical {value_1, ..., value_N} [default value]
...
@@ -53,14 +57,17 @@ def generate_pcs(nni_search_space_content):
...
@@ -53,14 +57,17 @@ def generate_pcs(nni_search_space_content):
# parameter_name real [min_value, max_value] [default value]
# parameter_name real [min_value, max_value] [default value]
# parameter_name real [min_value, max_value] [default value] log
# parameter_name real [min_value, max_value] [default value] log
Reference: https://automl.github.io/SMAC3/stable/options.html
Reference: https://automl.github.io/SMAC3/stable/options.html
Parameters
Parameters
----------
----------
nni_search_space_content: search_space
nni_search_space_content: search_space
The search space in this experiment in nni
The search space in this experiment in nni
Returns
Returns
-------
-------
Parameter Configuration Space (PCS)
Parameter Configuration Space (PCS)
the legal ranges of the parameters to be optimized and their default values
the legal ranges of the parameters to be optimized and their default values
Raises
Raises
------
------
RuntimeError
RuntimeError
...
@@ -122,7 +129,8 @@ def generate_pcs(nni_search_space_content):
...
@@ -122,7 +129,8 @@ def generate_pcs(nni_search_space_content):
def
generate_scenario
(
ss_content
):
def
generate_scenario
(
ss_content
):
"""Generate the scenario. The scenario-object (smac.scenario.scenario.Scenario) is used to configure SMAC and
"""
Generate the scenario. The scenario-object (smac.scenario.scenario.Scenario) is used to configure SMAC and
can be constructed either by providing an actual scenario-object, or by specifing the options in a scenario file.
can be constructed either by providing an actual scenario-object, or by specifing the options in a scenario file.
Reference: https://automl.github.io/SMAC3/stable/options.html
Reference: https://automl.github.io/SMAC3/stable/options.html
The format of the scenario file is one option per line:
The format of the scenario file is one option per line:
...
@@ -191,6 +199,7 @@ def generate_scenario(ss_content):
...
@@ -191,6 +199,7 @@ def generate_scenario(ss_content):
wallclock_limit: int
wallclock_limit: int
Maximum amount of wallclock-time used for optimization. Default: inf.
Maximum amount of wallclock-time used for optimization. Default: inf.
Use default because this is controlled by nni
Use default because this is controlled by nni
Returns
Returns
-------
-------
Scenario:
Scenario:
...
...
src/sdk/pynni/nni/smac_tuner/smac_tuner.py
View file @
594924a9
...
@@ -41,13 +41,17 @@ from .convert_ss_to_scenario import generate_scenario
...
@@ -41,13 +41,17 @@ from .convert_ss_to_scenario import generate_scenario
class
SMACTuner
(
Tuner
):
class
SMACTuner
(
Tuner
):
"""
"""
Parameters
This is a wrapper of [SMAC](https://github.com/automl/SMAC3) following NNI tuner interface.
----------
It only supports ``SMAC`` mode, and does not support the multiple instances of SMAC3 (i.e.,
optimize_mode: str
the same configuration is run multiple times).
optimize mode, 'maximize' or 'minimize', by default 'maximize'
"""
"""
def
__init__
(
self
,
optimize_mode
=
"maximize"
):
def
__init__
(
self
,
optimize_mode
=
"maximize"
):
"""Constructor"""
"""
Parameters
----------
optimize_mode : str
Optimize mode, 'maximize' or 'minimize', by default 'maximize'
"""
self
.
logger
=
logging
.
getLogger
(
self
.
logger
=
logging
.
getLogger
(
self
.
__module__
+
"."
+
self
.
__class__
.
__name__
)
self
.
__module__
+
"."
+
self
.
__class__
.
__name__
)
self
.
optimize_mode
=
OptimizeMode
(
optimize_mode
)
self
.
optimize_mode
=
OptimizeMode
(
optimize_mode
)
...
@@ -61,11 +65,14 @@ class SMACTuner(Tuner):
...
@@ -61,11 +65,14 @@ class SMACTuner(Tuner):
self
.
cs
=
None
self
.
cs
=
None
def
_main_cli
(
self
):
def
_main_cli
(
self
):
"""Main function of SMAC for CLI interface
"""
Main function of SMAC for CLI interface. Some initializations of the wrapped SMAC are done
in this function.
Returns
Returns
-------
-------
instance
obj
optimizer
The object of the SMAC
optimizer
"""
"""
self
.
logger
.
info
(
"SMAC call: %s"
,
" "
.
join
(
sys
.
argv
))
self
.
logger
.
info
(
"SMAC call: %s"
,
" "
.
join
(
sys
.
argv
))
...
@@ -126,20 +133,23 @@ class SMACTuner(Tuner):
...
@@ -126,20 +133,23 @@ class SMACTuner(Tuner):
def
update_search_space
(
self
,
search_space
):
def
update_search_space
(
self
,
search_space
):
"""
"""
NOTE: updating search space is not supported.
Convert search_space to the format that ``SMAC3`` could recognize, thus, not all the search space types
are supported. In this function, we also do the initialization of `SMAC3`, i.e., calling ``self._main_cli``.
NOTE: updating search space during experiment running is not supported.
Parameters
Parameters
----------
----------
search_space: dict
search_space
: dict
search space
The format could be referred to search space spec (https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html).
"""
"""
# TODO: this is ugly, we put all the initialization work in this method, because initialization relies
# on search space, also because update_search_space is called at the beginning.
if
not
self
.
update_ss_done
:
if
not
self
.
update_ss_done
:
self
.
categorical_dict
=
generate_scenario
(
search_space
)
self
.
categorical_dict
=
generate_scenario
(
search_space
)
if
self
.
categorical_dict
is
None
:
if
self
.
categorical_dict
is
None
:
raise
RuntimeError
(
'categorical dict is not correctly returned after parsing search space.'
)
raise
RuntimeError
(
'categorical dict is not correctly returned after parsing search space.'
)
# TODO: this is ugly, we put all the initialization work in this method, because initialization relies
# on search space, also because update_search_space is called at the beginning.
self
.
optimizer
=
self
.
_main_cli
()
self
.
optimizer
=
self
.
_main_cli
()
self
.
smbo_solver
=
self
.
optimizer
.
solver
self
.
smbo_solver
=
self
.
optimizer
.
solver
self
.
loguniform_key
=
{
key
for
key
in
search_space
.
keys
()
if
search_space
[
key
][
'_type'
]
==
'loguniform'
}
self
.
loguniform_key
=
{
key
for
key
in
search_space
.
keys
()
if
search_space
[
key
][
'_type'
]
==
'loguniform'
}
...
@@ -148,19 +158,23 @@ class SMACTuner(Tuner):
...
@@ -148,19 +158,23 @@ class SMACTuner(Tuner):
self
.
logger
.
warning
(
'update search space is not supported.'
)
self
.
logger
.
warning
(
'update search space is not supported.'
)
def
receive_trial_result
(
self
,
parameter_id
,
parameters
,
value
,
**
kwargs
):
def
receive_trial_result
(
self
,
parameter_id
,
parameters
,
value
,
**
kwargs
):
"""receive_trial_result
"""
Receive a trial's final performance result reported through :func:``nni.report_final_result`` by the trial.
GridSearchTuner does not need trial's results.
Parameters
Parameters
----------
----------
parameter_id: int
parameter_id : int
parameter id
Unique identifier of used hyper-parameters, same with :meth:`generate_parameters`.
parameters:
parameters : dict
parameters
Hyper-parameters generated by :meth:`generate_parameters`.
value:
value : dict
value
Result from trial (the return value of :func:`nni.report_final_result`).
Raises
Raises
------
------
RuntimeError
RuntimeError
Received parameter id not in total_data
Received parameter id not in
``self.
total_data
``
"""
"""
reward
=
extract_scalar_reward
(
value
)
reward
=
extract_scalar_reward
(
value
)
if
self
.
optimize_mode
is
OptimizeMode
.
Maximize
:
if
self
.
optimize_mode
is
OptimizeMode
.
Maximize
:
...
@@ -176,14 +190,16 @@ class SMACTuner(Tuner):
...
@@ -176,14 +190,16 @@ class SMACTuner(Tuner):
def
param_postprocess
(
self
,
challenger_dict
):
def
param_postprocess
(
self
,
challenger_dict
):
"""
"""
Postprocessing for a set of parameter includes:
Postprocessing for a set of hyperparameters includes:
1. Convert the values of type `loguniform` back to their initial range.
1. Convert the values of type ``loguniform`` back to their initial range.
2. Convert categorical: categorical values in search space are changed to list of numbers before,
2. Convert ``categorical``: categorical values in search space are changed to list of numbers before,
those original values will be changed back in this function.
those original values will be changed back in this function.
Parameters
Parameters
----------
----------
challenger_dict: dict
challenger_dict
: dict
challenger dict
challenger dict
Returns
Returns
-------
-------
dict
dict
...
@@ -203,15 +219,21 @@ class SMACTuner(Tuner):
...
@@ -203,15 +219,21 @@ class SMACTuner(Tuner):
return
converted_dict
return
converted_dict
def
generate_parameters
(
self
,
parameter_id
,
**
kwargs
):
def
generate_parameters
(
self
,
parameter_id
,
**
kwargs
):
"""generate one instance of hyperparameters
"""
Generate one instance of hyperparameters (i.e., one configuration).
Get one from SMAC3's ``challengers``.
Parameters
Parameters
----------
----------
parameter_id: int
parameter_id : int
parameter id
Unique identifier for requested hyper-parameters. This will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
Returns
-------
-------
lis
t
dic
t
new generated
parameters
One
new
ly
generated
configuration
"""
"""
if
self
.
first_one
:
if
self
.
first_one
:
init_challenger
=
self
.
smbo_solver
.
nni_smac_start
()
init_challenger
=
self
.
smbo_solver
.
nni_smac_start
()
...
@@ -224,15 +246,23 @@ class SMACTuner(Tuner):
...
@@ -224,15 +246,23 @@ class SMACTuner(Tuner):
return
self
.
param_postprocess
(
challenger
.
get_dictionary
())
return
self
.
param_postprocess
(
challenger
.
get_dictionary
())
def
generate_multiple_parameters
(
self
,
parameter_id_list
,
**
kwargs
):
def
generate_multiple_parameters
(
self
,
parameter_id_list
,
**
kwargs
):
"""generate mutiple instances of hyperparameters
"""
Generate mutiple instances of hyperparameters. If it is a first request,
retrieve the instances from initial challengers. While if it is not, request
new challengers and retrieve instances from the requested challengers.
Parameters
Parameters
----------
----------
parameter_id_list: list
parameter_id_list: list of int
list of parameter id
Unique identifiers for each set of requested hyper-parameters.
These will later be used in :meth:`receive_trial_result`.
**kwargs
Not used
Returns
Returns
-------
-------
list
list
list of new generated
parameter
s
a
list of new
ly
generated
configuration
s
"""
"""
if
self
.
first_one
:
if
self
.
first_one
:
params
=
[]
params
=
[]
...
@@ -254,11 +284,12 @@ class SMACTuner(Tuner):
...
@@ -254,11 +284,12 @@ class SMACTuner(Tuner):
def
import_data
(
self
,
data
):
def
import_data
(
self
,
data
):
"""
"""
Import additional data for tuning
Import additional data for tuning.
Parameters
Parameters
----------
----------
data: list of dict
data
: list of dict
Each of which has at least two keys, `parameter` and `value`.
Each of which has at least two keys,
`
`parameter`
`
and
`
`value`
`
.
"""
"""
_completed_num
=
0
_completed_num
=
0
for
trial_info
in
data
:
for
trial_info
in
data
:
...
...
src/sdk/pynni/nni/trial.py
View file @
594924a9
...
@@ -43,8 +43,18 @@ _sequence_id = platform.get_sequence_id()
...
@@ -43,8 +43,18 @@ _sequence_id = platform.get_sequence_id()
def
get_next_parameter
():
def
get_next_parameter
():
"""Returns a set of (hyper-)paremeters generated by Tuner.
"""
Returns None if no more (hyper-)parameters can be generated by Tuner."""
Get the hyper paremeters generated by tuner. For a multiphase experiment, it returns a new group of hyper
parameters at each call of get_next_parameter. For a non-multiphase (multiPhase is not configured or set to False)
experiment, it returns hyper parameters only on the first call for each trial job, it returns None since second call.
This API should be called only once in each trial job of an experiment which is not specified as multiphase.
Returns
-------
dict
A dict object contains the hyper parameters generated by tuner, the keys of the dict are defined in
search space. Returns None if no more hyper parameters can be generated by tuner.
"""
global
_params
global
_params
_params
=
platform
.
get_next_parameter
()
_params
=
platform
.
get_next_parameter
()
if
_params
is
None
:
if
_params
is
None
:
...
@@ -52,6 +62,15 @@ def get_next_parameter():
...
@@ -52,6 +62,15 @@ def get_next_parameter():
return
_params
[
'parameters'
]
return
_params
[
'parameters'
]
def
get_current_parameter
(
tag
=
None
):
def
get_current_parameter
(
tag
=
None
):
"""
Get current hyper parameters generated by tuner. It returns the same group of hyper parameters as the last
call of get_next_parameter returns.
Parameters
----------
tag: str
hyper parameter key
"""
global
_params
global
_params
if
_params
is
None
:
if
_params
is
None
:
return
None
return
None
...
@@ -60,19 +79,51 @@ def get_current_parameter(tag=None):
...
@@ -60,19 +79,51 @@ def get_current_parameter(tag=None):
return
_params
[
'parameters'
][
tag
]
return
_params
[
'parameters'
][
tag
]
def
get_experiment_id
():
def
get_experiment_id
():
"""
Get experiment ID.
Returns
-------
str
Identifier of current experiment
"""
return
_experiment_id
return
_experiment_id
def
get_trial_id
():
def
get_trial_id
():
"""
Get trial job ID which is string identifier of a trial job, for example 'MoXrp'. In one experiment, each trial
job has an unique string ID.
Returns
-------
str
Identifier of current trial job which is calling this API.
"""
return
_trial_id
return
_trial_id
def
get_sequence_id
():
def
get_sequence_id
():
"""
Get trial job sequence nubmer. A sequence number is an integer value assigned to each trial job base on the
order they are submitted, incremental starting from 0. In one experiment, both trial job ID and sequence number
are unique for each trial job, they are of different data types.
Returns
-------
int
Sequence number of current trial job which is calling this API.
"""
return
_sequence_id
return
_sequence_id
_intermediate_seq
=
0
_intermediate_seq
=
0
def
report_intermediate_result
(
metric
):
def
report_intermediate_result
(
metric
):
"""Reports intermediate result to Assessor.
"""
metric: serializable object.
Reports intermediate result to NNI.
Parameters
----------
metric:
serializable object.
"""
"""
global
_intermediate_seq
global
_intermediate_seq
assert
_params
is
not
None
,
'nni.get_next_parameter() needs to be called before report_intermediate_result'
assert
_params
is
not
None
,
'nni.get_next_parameter() needs to be called before report_intermediate_result'
...
@@ -88,8 +139,13 @@ def report_intermediate_result(metric):
...
@@ -88,8 +139,13 @@ def report_intermediate_result(metric):
def
report_final_result
(
metric
):
def
report_final_result
(
metric
):
"""Reports final result to tuner.
"""
metric: serializable object.
Reports final result to NNI.
Parameters
----------
metric:
serializable object.
"""
"""
assert
_params
is
not
None
,
'nni.get_next_parameter() needs to be called before report_final_result'
assert
_params
is
not
None
,
'nni.get_next_parameter() needs to be called before report_final_result'
metric
=
json_tricks
.
dumps
({
metric
=
json_tricks
.
dumps
({
...
...
src/sdk/pynni/nni/tuner.py
View file @
594924a9
...
@@ -76,10 +76,12 @@ class Tuner(Recoverable):
...
@@ -76,10 +76,12 @@ class Tuner(Recoverable):
Builtin tuners:
Builtin tuners:
:class:`~nni.hyperopt_tuner.hyperopt_tuner.HyperoptTuner`
:class:`~nni.hyperopt_tuner.hyperopt_tuner.HyperoptTuner`
:class:`~nni.evolution_tuner.evolution_tuner.EvolutionTuner`
:class:`~nni.evolution_tuner.evolution_tuner.EvolutionTuner`
:class:`~nni.smac_tuner.
smac_tuner.
SMACTuner`
:class:`~nni.smac_tuner.SMACTuner`
:class:`~nni.gridsearch_tuner.
gridsearch_tuner.
GridSearchTuner`
:class:`~nni.gridsearch_tuner.GridSearchTuner`
:class:`~nni.networkmorphism_tuner.networkmorphism_tuner.NetworkMorphismTuner`
:class:`~nni.networkmorphism_tuner.networkmorphism_tuner.NetworkMorphismTuner`
:class:`~nni.metis_tuner.mets_tuner.MetisTuner`
:class:`~nni.metis_tuner.mets_tuner.MetisTuner`
:class:`~nni.ppo_tuner.PPOTuner`
:class:`~nni.gp_tuner.gp_tuner.GPTuner`
"""
"""
def
generate_parameters
(
self
,
parameter_id
,
**
kwargs
):
def
generate_parameters
(
self
,
parameter_id
,
**
kwargs
):
...
...
src/sdk/pynni/tests/test_compressor.py
View file @
594924a9
...
@@ -2,81 +2,26 @@ from unittest import TestCase, main
...
@@ -2,81 +2,26 @@ from unittest import TestCase, main
import
tensorflow
as
tf
import
tensorflow
as
tf
import
torch
import
torch
import
torch.nn.functional
as
F
import
torch.nn.functional
as
F
import
nni.compression.tensorflow
as
tf_compressor
import
nni.compression.torch
as
torch_compressor
import
nni.compression.torch
as
torch_compressor
if
tf
.
__version__
>=
'2.0'
:
def
weight_variable
(
shape
):
import
nni.compression.tensorflow
as
tf_compressor
return
tf
.
Variable
(
tf
.
truncated_normal
(
shape
,
stddev
=
0.1
))
def
get_tf_mnist_model
():
model
=
tf
.
keras
.
models
.
Sequential
([
def
bias_variable
(
shape
):
tf
.
keras
.
layers
.
Conv2D
(
filters
=
32
,
kernel_size
=
7
,
input_shape
=
[
28
,
28
,
1
],
activation
=
'relu'
,
padding
=
"SAME"
),
return
tf
.
Variable
(
tf
.
constant
(
0.1
,
shape
=
shape
))
tf
.
keras
.
layers
.
MaxPooling2D
(
pool_size
=
2
),
tf
.
keras
.
layers
.
Conv2D
(
filters
=
64
,
kernel_size
=
3
,
activation
=
'relu'
,
padding
=
"SAME"
),
tf
.
keras
.
layers
.
MaxPooling2D
(
pool_size
=
2
),
def
conv2d
(
x_input
,
w_matrix
):
tf
.
keras
.
layers
.
Flatten
(),
return
tf
.
nn
.
conv2d
(
x_input
,
w_matrix
,
strides
=
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
)
tf
.
keras
.
layers
.
Dense
(
units
=
128
,
activation
=
'relu'
),
tf
.
keras
.
layers
.
Dropout
(
0.5
),
tf
.
keras
.
layers
.
Dense
(
units
=
10
,
activation
=
'softmax'
),
def
max_pool
(
x_input
,
pool_size
):
])
size
=
[
1
,
pool_size
,
pool_size
,
1
]
model
.
compile
(
loss
=
"sparse_categorical_crossentropy"
,
return
tf
.
nn
.
max_pool
(
x_input
,
ksize
=
size
,
strides
=
size
,
padding
=
'SAME'
)
optimizer
=
tf
.
keras
.
optimizers
.
SGD
(
lr
=
1e-3
),
metrics
=
[
"accuracy"
])
return
model
class
TfMnist
:
def
__init__
(
self
):
images
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
784
],
name
=
'input_x'
)
labels
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
10
],
name
=
'input_y'
)
keep_prob
=
tf
.
placeholder
(
tf
.
float32
,
name
=
'keep_prob'
)
self
.
images
=
images
self
.
labels
=
labels
self
.
keep_prob
=
keep_prob
self
.
train_step
=
None
self
.
accuracy
=
None
self
.
w1
=
None
self
.
b1
=
None
self
.
fcw1
=
None
self
.
cross
=
None
with
tf
.
name_scope
(
'reshape'
):
x_image
=
tf
.
reshape
(
images
,
[
-
1
,
28
,
28
,
1
])
with
tf
.
name_scope
(
'conv1'
):
w_conv1
=
weight_variable
([
5
,
5
,
1
,
32
])
self
.
w1
=
w_conv1
b_conv1
=
bias_variable
([
32
])
self
.
b1
=
b_conv1
h_conv1
=
tf
.
nn
.
relu
(
conv2d
(
x_image
,
w_conv1
)
+
b_conv1
)
with
tf
.
name_scope
(
'pool1'
):
h_pool1
=
max_pool
(
h_conv1
,
2
)
with
tf
.
name_scope
(
'conv2'
):
w_conv2
=
weight_variable
([
5
,
5
,
32
,
64
])
b_conv2
=
bias_variable
([
64
])
h_conv2
=
tf
.
nn
.
relu
(
conv2d
(
h_pool1
,
w_conv2
)
+
b_conv2
)
with
tf
.
name_scope
(
'pool2'
):
h_pool2
=
max_pool
(
h_conv2
,
2
)
with
tf
.
name_scope
(
'fc1'
):
w_fc1
=
weight_variable
([
7
*
7
*
64
,
1024
])
self
.
fcw1
=
w_fc1
b_fc1
=
bias_variable
([
1024
])
h_pool2_flat
=
tf
.
reshape
(
h_pool2
,
[
-
1
,
7
*
7
*
64
])
h_fc1
=
tf
.
nn
.
relu
(
tf
.
matmul
(
h_pool2_flat
,
w_fc1
)
+
b_fc1
)
with
tf
.
name_scope
(
'dropout'
):
h_fc1_drop
=
tf
.
nn
.
dropout
(
h_fc1
,
0.5
)
with
tf
.
name_scope
(
'fc2'
):
w_fc2
=
weight_variable
([
1024
,
10
])
b_fc2
=
bias_variable
([
10
])
y_conv
=
tf
.
matmul
(
h_fc1_drop
,
w_fc2
)
+
b_fc2
with
tf
.
name_scope
(
'loss'
):
cross_entropy
=
tf
.
reduce_mean
(
tf
.
nn
.
softmax_cross_entropy_with_logits
(
labels
=
labels
,
logits
=
y_conv
))
self
.
cross
=
cross_entropy
with
tf
.
name_scope
(
'adam_optimizer'
):
self
.
train_step
=
tf
.
train
.
AdamOptimizer
(
0.0001
).
minimize
(
cross_entropy
)
with
tf
.
name_scope
(
'accuracy'
):
correct_prediction
=
tf
.
equal
(
tf
.
argmax
(
y_conv
,
1
),
tf
.
argmax
(
labels
,
1
))
self
.
accuracy
=
tf
.
reduce_mean
(
tf
.
cast
(
correct_prediction
,
tf
.
float32
))
class
TorchMnist
(
torch
.
nn
.
Module
):
class
TorchMnist
(
torch
.
nn
.
Module
):
def
__init__
(
self
):
def
__init__
(
self
):
...
@@ -96,25 +41,47 @@ class TorchMnist(torch.nn.Module):
...
@@ -96,25 +41,47 @@ class TorchMnist(torch.nn.Module):
x
=
self
.
fc2
(
x
)
x
=
self
.
fc2
(
x
)
return
F
.
log_softmax
(
x
,
dim
=
1
)
return
F
.
log_softmax
(
x
,
dim
=
1
)
def
tf2
(
func
):
def
test_tf2_func
(
self
):
if
tf
.
__version__
>=
'2.0'
:
func
()
return
test_tf2_func
class
CompressorTestCase
(
TestCase
):
class
CompressorTestCase
(
TestCase
):
def
test_tf_pruner
(
self
):
model
=
TfMnist
()
configure_list
=
[{
'sparsity'
:
0.8
,
'op_types'
:
[
'default'
]}]
tf_compressor
.
LevelPruner
(
tf
.
get_default_graph
(),
configure_list
).
compress
()
def
test_tf_quantizer
(
self
):
model
=
TfMnist
()
tf_compressor
.
NaiveQuantizer
(
tf
.
get_default_graph
(),
[{
'op_types'
:
[
'default'
]}]).
compress
()
def
test_torch_pruner
(
self
):
def
test_torch_pruner
(
self
):
model
=
TorchMnist
()
model
=
TorchMnist
()
configure_list
=
[{
'sparsity'
:
0.8
,
'op_types'
:
[
'default'
]}]
configure_list
=
[{
'sparsity'
:
0.8
,
'op_types'
:
[
'default'
]}]
torch_compressor
.
LevelPruner
(
model
,
configure_list
).
compress
()
torch_compressor
.
LevelPruner
(
model
,
configure_list
).
compress
()
def
test_torch_fpgm_pruner
(
self
):
model
=
TorchMnist
()
configure_list
=
[{
'sparsity'
:
0.5
,
'op_types'
:
[
'Conv2d'
]}]
torch_compressor
.
FPGMPruner
(
model
,
configure_list
).
compress
()
def
test_torch_quantizer
(
self
):
def
test_torch_quantizer
(
self
):
model
=
TorchMnist
()
model
=
TorchMnist
()
torch_compressor
.
NaiveQuantizer
(
model
,
[{
'op_types'
:
[
'default'
]}]).
compress
()
configure_list
=
[{
'quant_types'
:
[
'weight'
],
'quant_bits'
:
{
'weight'
:
8
,
},
'op_types'
:[
'Conv2d'
,
'Linear'
]
}]
torch_compressor
.
NaiveQuantizer
(
model
,
configure_list
).
compress
()
@
tf2
def
test_tf_pruner
(
self
):
configure_list
=
[{
'sparsity'
:
0.8
,
'op_types'
:
[
'default'
]}]
tf_compressor
.
LevelPruner
(
get_tf_mnist_model
(),
configure_list
).
compress
()
@
tf2
def
test_tf_quantizer
(
self
):
tf_compressor
.
NaiveQuantizer
(
get_tf_mnist_model
(),
[{
'op_types'
:
[
'default'
]}]).
compress
()
@
tf2
def
test_tf_fpgm_pruner
(
self
):
configure_list
=
[{
'sparsity'
:
0.5
,
'op_types'
:
[
'Conv2D'
]}]
tf_compressor
.
FPGMPruner
(
get_tf_mnist_model
(),
configure_list
).
compress
()
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
test/pipelines-it-installation.yml
0 → 100644
View file @
594924a9
jobs
:
-
job
:
'
pip_install_ubuntu_python36'
pool
:
vmImage
:
'
ubuntu-18.04'
strategy
:
matrix
:
Python36
:
PYTHON_VERSION
:
'
3.6'
steps
:
-
script
:
|
python3 -V
python3 -m pip install --upgrade pip setuptools --user
python3 -m pip install --upgrade nni --user
displayName
:
'
Install
nni'
-
job
:
'
pip_install_macOS_python36'
pool
:
vmImage
:
'
macOS-10.13'
strategy
:
matrix
:
Python36
:
PYTHON_VERSION
:
'
3.6'
steps
:
-
script
:
|
python3 -V
python3 -m pip install --upgrade pip setuptools --user
python3 -m pip install --upgrade nni --user
displayName
:
'
Install
nni'
-
job
:
'
pip_install_windows_python36'
pool
:
vmImage
:
'
windows-latest'
strategy
:
matrix
:
Python36
:
PYTHON_VERSION
:
'
3.6'
steps
:
-
script
:
|
python -V
python -m pip install --upgrade pip setuptools --user
python -m pip install --upgrade nni --user
displayName
:
'
Install
nni'
tools/nni_cmd/config_utils.py
View file @
594924a9
...
@@ -72,7 +72,7 @@ class Experiments:
...
@@ -72,7 +72,7 @@ class Experiments:
self
.
experiment_file
=
os
.
path
.
join
(
NNICTL_HOME_DIR
,
'.experiment'
)
self
.
experiment_file
=
os
.
path
.
join
(
NNICTL_HOME_DIR
,
'.experiment'
)
self
.
experiments
=
self
.
read_file
()
self
.
experiments
=
self
.
read_file
()
def
add_experiment
(
self
,
expId
,
port
,
time
,
file_name
,
platform
):
def
add_experiment
(
self
,
expId
,
port
,
time
,
file_name
,
platform
,
experiment_name
):
'''set {key:value} paris to self.experiment'''
'''set {key:value} paris to self.experiment'''
self
.
experiments
[
expId
]
=
{}
self
.
experiments
[
expId
]
=
{}
self
.
experiments
[
expId
][
'port'
]
=
port
self
.
experiments
[
expId
][
'port'
]
=
port
...
@@ -81,6 +81,7 @@ class Experiments:
...
@@ -81,6 +81,7 @@ class Experiments:
self
.
experiments
[
expId
][
'status'
]
=
'INITIALIZED'
self
.
experiments
[
expId
][
'status'
]
=
'INITIALIZED'
self
.
experiments
[
expId
][
'fileName'
]
=
file_name
self
.
experiments
[
expId
][
'fileName'
]
=
file_name
self
.
experiments
[
expId
][
'platform'
]
=
platform
self
.
experiments
[
expId
][
'platform'
]
=
platform
self
.
experiments
[
expId
][
'experimentName'
]
=
experiment_name
self
.
write_file
()
self
.
write_file
()
def
update_experiment
(
self
,
expId
,
key
,
value
):
def
update_experiment
(
self
,
expId
,
key
,
value
):
...
...
tools/nni_cmd/constants.py
View file @
594924a9
...
@@ -66,7 +66,7 @@ EXPERIMENT_INFORMATION_FORMAT = '-----------------------------------------------
...
@@ -66,7 +66,7 @@ EXPERIMENT_INFORMATION_FORMAT = '-----------------------------------------------
'%s
\n
'
\
'%s
\n
'
\
'----------------------------------------------------------------------------------------
\n
'
'----------------------------------------------------------------------------------------
\n
'
EXPERIMENT_DETAIL_FORMAT
=
'Id: %s Status: %s Port: %s Platform: %s StartTime: %s EndTime: %s
\n
'
EXPERIMENT_DETAIL_FORMAT
=
'Id: %s
Name: %s
Status: %s Port: %s Platform: %s StartTime: %s EndTime: %s
\n
'
EXPERIMENT_MONITOR_INFO
=
'Id: %s Status: %s Port: %s Platform: %s
\n
'
\
EXPERIMENT_MONITOR_INFO
=
'Id: %s Status: %s Port: %s Platform: %s
\n
'
\
'StartTime: %s Duration: %s'
'StartTime: %s Duration: %s'
...
...
tools/nni_cmd/launcher.py
View file @
594924a9
...
@@ -478,10 +478,11 @@ def launch_experiment(args, experiment_config, mode, config_file_name, experimen
...
@@ -478,10 +478,11 @@ def launch_experiment(args, experiment_config, mode, config_file_name, experimen
web_ui_url_list
=
get_local_urls
(
args
.
port
)
web_ui_url_list
=
get_local_urls
(
args
.
port
)
nni_config
.
set_config
(
'webuiUrl'
,
web_ui_url_list
)
nni_config
.
set_config
(
'webuiUrl'
,
web_ui_url_list
)
#save experiment information
#
save experiment information
nnictl_experiment_config
=
Experiments
()
nnictl_experiment_config
=
Experiments
()
nnictl_experiment_config
.
add_experiment
(
experiment_id
,
args
.
port
,
start_time
,
config_file_name
,
\
nnictl_experiment_config
.
add_experiment
(
experiment_id
,
args
.
port
,
start_time
,
config_file_name
,
experiment_config
[
'trainingServicePlatform'
])
experiment_config
[
'trainingServicePlatform'
],
experiment_config
[
'experimentName'
])
print_normal
(
EXPERIMENT_SUCCESS_INFO
%
(
experiment_id
,
' '
.
join
(
web_ui_url_list
)))
print_normal
(
EXPERIMENT_SUCCESS_INFO
%
(
experiment_id
,
' '
.
join
(
web_ui_url_list
)))
...
...
tools/nni_cmd/nnictl_utils.py
View file @
594924a9
...
@@ -99,9 +99,13 @@ def check_experiment_id(args, update=True):
...
@@ -99,9 +99,13 @@ def check_experiment_id(args, update=True):
print_error
(
'There are multiple experiments, please set the experiment id...'
)
print_error
(
'There are multiple experiments, please set the experiment id...'
)
experiment_information
=
""
experiment_information
=
""
for
key
in
running_experiment_list
:
for
key
in
running_experiment_list
:
experiment_information
+=
(
EXPERIMENT_DETAIL_FORMAT
%
(
key
,
experiment_dict
[
key
][
'status'
],
\
experiment_information
+=
EXPERIMENT_DETAIL_FORMAT
%
(
key
,
experiment_dict
[
key
][
'port'
],
experiment_dict
[
key
].
get
(
'platform'
),
experiment_dict
[
key
][
'startTime'
],
\
experiment_dict
[
key
].
get
(
'experimentName'
,
'N/A'
),
experiment_dict
[
key
][
'endTime'
]))
experiment_dict
[
key
][
'status'
],
experiment_dict
[
key
][
'port'
],
experiment_dict
[
key
].
get
(
'platform'
),
experiment_dict
[
key
][
'startTime'
],
experiment_dict
[
key
][
'endTime'
])
print
(
EXPERIMENT_INFORMATION_FORMAT
%
experiment_information
)
print
(
EXPERIMENT_INFORMATION_FORMAT
%
experiment_information
)
exit
(
1
)
exit
(
1
)
elif
not
running_experiment_list
:
elif
not
running_experiment_list
:
...
@@ -155,9 +159,13 @@ def parse_ids(args):
...
@@ -155,9 +159,13 @@ def parse_ids(args):
print_error
(
'There are multiple experiments, please set the experiment id...'
)
print_error
(
'There are multiple experiments, please set the experiment id...'
)
experiment_information
=
""
experiment_information
=
""
for
key
in
running_experiment_list
:
for
key
in
running_experiment_list
:
experiment_information
+=
(
EXPERIMENT_DETAIL_FORMAT
%
(
key
,
experiment_dict
[
key
][
'status'
],
\
experiment_information
+=
EXPERIMENT_DETAIL_FORMAT
%
(
key
,
experiment_dict
[
key
][
'port'
],
experiment_dict
[
key
].
get
(
'platform'
),
experiment_dict
[
key
][
'startTime'
],
\
experiment_dict
[
key
].
get
(
'experimentName'
,
'N/A'
),
experiment_dict
[
key
][
'endTime'
]))
experiment_dict
[
key
][
'status'
],
experiment_dict
[
key
][
'port'
],
experiment_dict
[
key
].
get
(
'platform'
),
experiment_dict
[
key
][
'startTime'
],
experiment_dict
[
key
][
'endTime'
])
print
(
EXPERIMENT_INFORMATION_FORMAT
%
experiment_information
)
print
(
EXPERIMENT_INFORMATION_FORMAT
%
experiment_information
)
exit
(
1
)
exit
(
1
)
else
:
else
:
...
@@ -573,8 +581,13 @@ def experiment_list(args):
...
@@ -573,8 +581,13 @@ def experiment_list(args):
print_warning
(
'There is no experiment running...
\n
You can use
\'
nnictl experiment list --all
\'
to list all experiments.'
)
print_warning
(
'There is no experiment running...
\n
You can use
\'
nnictl experiment list --all
\'
to list all experiments.'
)
experiment_information
=
""
experiment_information
=
""
for
key
in
experiment_id_list
:
for
key
in
experiment_id_list
:
experiment_information
+=
(
EXPERIMENT_DETAIL_FORMAT
%
(
key
,
experiment_dict
[
key
][
'status'
],
experiment_dict
[
key
][
'port'
],
\
experiment_information
+=
EXPERIMENT_DETAIL_FORMAT
%
(
key
,
experiment_dict
[
key
].
get
(
'platform'
),
experiment_dict
[
key
][
'startTime'
],
experiment_dict
[
key
][
'endTime'
]))
experiment_dict
[
key
].
get
(
'experimentName'
,
'N/A'
),
experiment_dict
[
key
][
'status'
],
experiment_dict
[
key
][
'port'
],
experiment_dict
[
key
].
get
(
'platform'
),
experiment_dict
[
key
][
'startTime'
],
experiment_dict
[
key
][
'endTime'
])
print
(
EXPERIMENT_INFORMATION_FORMAT
%
experiment_information
)
print
(
EXPERIMENT_INFORMATION_FORMAT
%
experiment_information
)
def
get_time_interval
(
time1
,
time2
):
def
get_time_interval
(
time1
,
time2
):
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment