Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nni
Commits
85cb472e
Commit
85cb472e
authored
Jan 08, 2019
by
Shinai Yang (FA TALENT)
Browse files
Merge branch 'master' of
https://github.com/SparkSnail/nni
parents
85c015dc
2c862dcb
Changes
41
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1467 additions
and
187 deletions
+1467
-187
src/sdk/pynni/nni/metis_tuner/Regression_GP/Prediction.py
src/sdk/pynni/nni/metis_tuner/Regression_GP/Prediction.py
+37
-0
src/sdk/pynni/nni/metis_tuner/Regression_GP/Selection.py
src/sdk/pynni/nni/metis_tuner/Regression_GP/Selection.py
+114
-0
src/sdk/pynni/nni/metis_tuner/Regression_GP/__init__.py
src/sdk/pynni/nni/metis_tuner/Regression_GP/__init__.py
+0
-0
src/sdk/pynni/nni/metis_tuner/lib_acquisition_function.py
src/sdk/pynni/nni/metis_tuner/lib_acquisition_function.py
+202
-0
src/sdk/pynni/nni/metis_tuner/lib_constraint_summation.py
src/sdk/pynni/nni/metis_tuner/lib_constraint_summation.py
+116
-0
src/sdk/pynni/nni/metis_tuner/lib_data.py
src/sdk/pynni/nni/metis_tuner/lib_data.py
+67
-0
src/sdk/pynni/nni/metis_tuner/metis_tuner.py
src/sdk/pynni/nni/metis_tuner/metis_tuner.py
+440
-0
src/sdk/pynni/nni/metis_tuner/requirments.txt
src/sdk/pynni/nni/metis_tuner/requirments.txt
+1
-0
src/sdk/pynni/requirements.txt
src/sdk/pynni/requirements.txt
+4
-1
src/webui/src/components/Overview.tsx
src/webui/src/components/Overview.tsx
+4
-0
src/webui/src/components/TrialsDetail.tsx
src/webui/src/components/TrialsDetail.tsx
+39
-19
src/webui/src/components/overview/Progress.tsx
src/webui/src/components/overview/Progress.tsx
+8
-8
src/webui/src/components/trial-detail/TableList.tsx
src/webui/src/components/trial-detail/TableList.tsx
+318
-143
src/webui/src/static/const.ts
src/webui/src/static/const.ts
+40
-5
src/webui/src/static/function.ts
src/webui/src/static/function.ts
+18
-2
src/webui/src/static/interface.ts
src/webui/src/static/interface.ts
+21
-2
src/webui/src/static/style/search.scss
src/webui/src/static/style/search.scss
+14
-0
tools/nni_cmd/config_schema.py
tools/nni_cmd/config_schema.py
+10
-0
tools/nni_cmd/launcher_utils.py
tools/nni_cmd/launcher_utils.py
+2
-2
tools/nni_trial_tool/log_utils.py
tools/nni_trial_tool/log_utils.py
+12
-5
No files found.
src/sdk/pynni/nni/metis_tuner/Regression_GP/Prediction.py
0 → 100644
View file @
85cb472e
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import
os
import
sys
import
numpy
sys
.
path
.
insert
(
1
,
os
.
path
.
join
(
sys
.
path
[
0
],
'..'
))
def
predict
(
parameters_value
,
regressor_gp
):
'''
Predict by Gaussian Process Model
'''
parameters_value
=
numpy
.
array
(
parameters_value
).
reshape
(
-
1
,
len
(
parameters_value
))
mu
,
sigma
=
regressor_gp
.
predict
(
parameters_value
,
return_std
=
True
)
return
mu
[
0
],
sigma
[
0
]
\ No newline at end of file
src/sdk/pynni/nni/metis_tuner/Regression_GP/Selection.py
0 → 100644
View file @
85cb472e
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import
os
import
random
import
sys
import
nni.metis_tuner.lib_acquisition_function
as
lib_acquisition_function
import
nni.metis_tuner.lib_constraint_summation
as
lib_constraint_summation
import
nni.metis_tuner.lib_data
as
lib_data
import
nni.metis_tuner.Regression_GP.Prediction
as
gp_prediction
sys
.
path
.
insert
(
1
,
os
.
path
.
join
(
sys
.
path
[
0
],
'..'
))
CONSTRAINT_LOWERBOUND
=
None
CONSTRAINT_UPPERBOUND
=
None
CONSTRAINT_PARAMS_IDX
=
[]
def
selection_r
(
acquisition_function
,
samples_y_aggregation
,
x_bounds
,
x_types
,
regressor_gp
,
num_starting_points
=
100
,
minimize_constraints_fun
=
None
):
'''
Selecte R value
'''
minimize_starting_points
=
[
lib_data
.
rand
(
x_bounds
,
x_types
)
\
for
i
in
range
(
0
,
num_starting_points
)]
outputs
=
selection
(
acquisition_function
,
samples_y_aggregation
,
x_bounds
,
x_types
,
regressor_gp
,
minimize_starting_points
,
minimize_constraints_fun
=
minimize_constraints_fun
)
return
outputs
def
selection
(
acquisition_function
,
samples_y_aggregation
,
x_bounds
,
x_types
,
regressor_gp
,
minimize_starting_points
,
minimize_constraints_fun
=
None
):
'''
selection
'''
outputs
=
None
sys
.
stderr
.
write
(
"[%s] Exercise
\"
%s
\"
acquisition function
\n
"
\
%
(
os
.
path
.
basename
(
__file__
),
acquisition_function
))
if
acquisition_function
==
"ei"
:
outputs
=
lib_acquisition_function
.
next_hyperparameter_expected_improvement
(
\
gp_prediction
.
predict
,
[
regressor_gp
],
x_bounds
,
x_types
,
\
samples_y_aggregation
,
minimize_starting_points
,
\
minimize_constraints_fun
=
minimize_constraints_fun
)
elif
acquisition_function
==
"lc"
:
outputs
=
lib_acquisition_function
.
next_hyperparameter_lowest_confidence
(
\
gp_prediction
.
predict
,
[
regressor_gp
],
x_bounds
,
x_types
,
\
minimize_starting_points
,
minimize_constraints_fun
=
minimize_constraints_fun
)
elif
acquisition_function
==
"lm"
:
outputs
=
lib_acquisition_function
.
next_hyperparameter_lowest_mu
(
\
gp_prediction
.
predict
,
[
regressor_gp
],
x_bounds
,
x_types
,
\
minimize_starting_points
,
minimize_constraints_fun
=
minimize_constraints_fun
)
return
outputs
def
_rand_with_constraints
(
x_bounds
,
x_types
):
'''
Random generate with constraints
'''
outputs
=
None
x_bounds_withconstraints
=
[
x_bounds
[
i
]
for
i
in
CONSTRAINT_PARAMS_IDX
]
x_types_withconstraints
=
[
x_types
[
i
]
for
i
in
CONSTRAINT_PARAMS_IDX
]
x_val_withconstraints
=
lib_constraint_summation
.
rand
(
x_bounds_withconstraints
,
x_types_withconstraints
,
CONSTRAINT_LOWERBOUND
,
CONSTRAINT_UPPERBOUND
)
if
x_val_withconstraints
is
not
None
:
outputs
=
[
None
]
*
len
(
x_bounds
)
for
i
,
_
in
enumerate
(
CONSTRAINT_PARAMS_IDX
):
outputs
[
CONSTRAINT_PARAMS_IDX
[
i
]]
=
x_val_withconstraints
[
i
]
for
i
,
_
in
enumerate
(
outputs
):
if
outputs
[
i
]
is
None
:
outputs
[
i
]
=
random
.
randint
(
x_bounds
[
i
][
0
],
x_bounds
[
i
][
1
])
return
outputs
def
_minimize_constraints_fun_summation
(
x
):
'''
Minimize the constraints fun summation
'''
summation
=
sum
([
x
[
i
]
for
i
in
CONSTRAINT_PARAMS_IDX
])
return
CONSTRAINT_UPPERBOUND
>=
summation
>=
CONSTRAINT_LOWERBOUND
src/sdk/pynni/nni/metis_tuner/Regression_GP/__init__.py
0 → 100644
View file @
85cb472e
src/sdk/pynni/nni/metis_tuner/lib_acquisition_function.py
0 → 100644
View file @
85cb472e
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import
sys
import
numpy
from
scipy.stats
import
norm
from
scipy.optimize
import
minimize
import
nni.metis_tuner.lib_data
as
lib_data
def
next_hyperparameter_expected_improvement
(
fun_prediction
,
fun_prediction_args
,
x_bounds
,
x_types
,
samples_y_aggregation
,
minimize_starting_points
,
minimize_constraints_fun
=
None
):
'''
"Expected Improvement" acquisition function
'''
best_x
=
None
best_acquisition_value
=
None
x_bounds_minmax
=
[[
i
[
0
],
i
[
-
1
]]
for
i
in
x_bounds
]
x_bounds_minmax
=
numpy
.
array
(
x_bounds_minmax
)
for
starting_point
in
numpy
.
array
(
minimize_starting_points
):
res
=
minimize
(
fun
=
_expected_improvement
,
x0
=
starting_point
.
reshape
(
1
,
-
1
),
bounds
=
x_bounds_minmax
,
method
=
"L-BFGS-B"
,
args
=
(
fun_prediction
,
fun_prediction_args
,
x_bounds
,
x_types
,
samples_y_aggregation
,
minimize_constraints_fun
))
if
(
best_acquisition_value
is
None
)
or
\
(
res
.
fun
<
best_acquisition_value
):
res
.
x
=
numpy
.
ndarray
.
tolist
(
res
.
x
)
res
.
x
=
lib_data
.
match_val_type
(
res
.
x
,
x_bounds
,
x_types
)
if
(
minimize_constraints_fun
is
None
)
or
\
(
minimize_constraints_fun
(
res
.
x
)
is
True
):
best_acquisition_value
=
res
.
fun
best_x
=
res
.
x
outputs
=
None
if
best_x
is
not
None
:
mu
,
sigma
=
fun_prediction
(
best_x
,
*
fun_prediction_args
)
outputs
=
{
'hyperparameter'
:
best_x
,
'expected_mu'
:
mu
,
'expected_sigma'
:
sigma
,
'acquisition_func'
:
"ei"
}
return
outputs
def
_expected_improvement
(
x
,
fun_prediction
,
fun_prediction_args
,
x_bounds
,
x_types
,
samples_y_aggregation
,
minimize_constraints_fun
):
# This is only for step-wise optimization
x
=
lib_data
.
match_val_type
(
x
,
x_bounds
,
x_types
)
expected_improvement
=
sys
.
maxsize
if
(
minimize_constraints_fun
is
None
)
or
(
minimize_constraints_fun
(
x
)
is
True
):
mu
,
sigma
=
fun_prediction
(
x
,
*
fun_prediction_args
)
loss_optimum
=
min
(
samples_y_aggregation
)
scaling_factor
=
-
1
# In case sigma equals zero
with
numpy
.
errstate
(
divide
=
"ignore"
):
Z
=
scaling_factor
*
(
mu
-
loss_optimum
)
/
sigma
expected_improvement
=
scaling_factor
*
(
mu
-
loss_optimum
)
*
\
norm
.
cdf
(
Z
)
+
sigma
*
norm
.
pdf
(
Z
)
expected_improvement
=
0.0
if
sigma
==
0.0
else
expected_improvement
# We want expected_improvement to be as large as possible
# (i.e., as small as possible for minimize(...))
expected_improvement
=
-
1
*
expected_improvement
return
expected_improvement
def
next_hyperparameter_lowest_confidence
(
fun_prediction
,
fun_prediction_args
,
x_bounds
,
x_types
,
minimize_starting_points
,
minimize_constraints_fun
=
None
):
'''
"Lowest Confidence" acquisition function
'''
best_x
=
None
best_acquisition_value
=
None
x_bounds_minmax
=
[[
i
[
0
],
i
[
-
1
]]
for
i
in
x_bounds
]
x_bounds_minmax
=
numpy
.
array
(
x_bounds_minmax
)
for
starting_point
in
numpy
.
array
(
minimize_starting_points
):
res
=
minimize
(
fun
=
_lowest_confidence
,
x0
=
starting_point
.
reshape
(
1
,
-
1
),
bounds
=
x_bounds_minmax
,
method
=
"L-BFGS-B"
,
args
=
(
fun_prediction
,
fun_prediction_args
,
x_bounds
,
x_types
,
minimize_constraints_fun
))
if
(
best_acquisition_value
)
is
None
or
(
res
.
fun
<
best_acquisition_value
):
res
.
x
=
numpy
.
ndarray
.
tolist
(
res
.
x
)
res
.
x
=
lib_data
.
match_val_type
(
res
.
x
,
x_bounds
,
x_types
)
if
(
minimize_constraints_fun
is
None
)
or
(
minimize_constraints_fun
(
res
.
x
)
is
True
):
best_acquisition_value
=
res
.
fun
best_x
=
res
.
x
outputs
=
None
if
best_x
is
not
None
:
mu
,
sigma
=
fun_prediction
(
best_x
,
*
fun_prediction_args
)
outputs
=
{
'hyperparameter'
:
best_x
,
'expected_mu'
:
mu
,
'expected_sigma'
:
sigma
,
'acquisition_func'
:
"lc"
}
return
outputs
def
_lowest_confidence
(
x
,
fun_prediction
,
fun_prediction_args
,
x_bounds
,
x_types
,
minimize_constraints_fun
):
# This is only for step-wise optimization
x
=
lib_data
.
match_val_type
(
x
,
x_bounds
,
x_types
)
ci
=
sys
.
maxsize
if
(
minimize_constraints_fun
is
None
)
or
(
minimize_constraints_fun
(
x
)
is
True
):
mu
,
sigma
=
fun_prediction
(
x
,
*
fun_prediction_args
)
ci
=
(
sigma
*
1.96
*
2
)
/
mu
# We want ci to be as large as possible
# (i.e., as small as possible for minimize(...),
# because this would mean lowest confidence
ci
=
-
1
*
ci
return
ci
def
next_hyperparameter_lowest_mu
(
fun_prediction
,
fun_prediction_args
,
x_bounds
,
x_types
,
minimize_starting_points
,
minimize_constraints_fun
=
None
):
'''
"Lowest Mu" acquisition function
'''
best_x
=
None
best_acquisition_value
=
None
x_bounds_minmax
=
[[
i
[
0
],
i
[
-
1
]]
for
i
in
x_bounds
]
x_bounds_minmax
=
numpy
.
array
(
x_bounds_minmax
)
for
starting_point
in
numpy
.
array
(
minimize_starting_points
):
res
=
minimize
(
fun
=
_lowest_mu
,
x0
=
starting_point
.
reshape
(
1
,
-
1
),
bounds
=
x_bounds_minmax
,
method
=
"L-BFGS-B"
,
args
=
(
fun_prediction
,
fun_prediction_args
,
\
x_bounds
,
x_types
,
minimize_constraints_fun
))
if
(
best_acquisition_value
is
None
)
or
(
res
.
fun
<
best_acquisition_value
):
res
.
x
=
numpy
.
ndarray
.
tolist
(
res
.
x
)
res
.
x
=
lib_data
.
match_val_type
(
res
.
x
,
x_bounds
,
x_types
)
if
(
minimize_constraints_fun
is
None
)
or
(
minimize_constraints_fun
(
res
.
x
)
is
True
):
best_acquisition_value
=
res
.
fun
best_x
=
res
.
x
outputs
=
None
if
best_x
is
not
None
:
mu
,
sigma
=
fun_prediction
(
best_x
,
*
fun_prediction_args
)
outputs
=
{
'hyperparameter'
:
best_x
,
'expected_mu'
:
mu
,
'expected_sigma'
:
sigma
,
'acquisition_func'
:
"lm"
}
return
outputs
def
_lowest_mu
(
x
,
fun_prediction
,
fun_prediction_args
,
x_bounds
,
x_types
,
minimize_constraints_fun
):
'''
Calculate the lowest mu
'''
# This is only for step-wise optimization
x
=
lib_data
.
match_val_type
(
x
,
x_bounds
,
x_types
)
mu
=
sys
.
maxsize
if
(
minimize_constraints_fun
is
None
)
or
(
minimize_constraints_fun
(
x
)
is
True
):
mu
,
_
=
fun_prediction
(
x
,
*
fun_prediction_args
)
return
mu
\ No newline at end of file
src/sdk/pynni/nni/metis_tuner/lib_constraint_summation.py
0 → 100644
View file @
85cb472e
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import
math
import
random
from
operator
import
itemgetter
def
check_feasibility
(
x_bounds
,
lowerbound
,
upperbound
):
'''
This can have false positives.
For examples, parameters can only be 0 or 5, and the summation constraint is between 6 and 7.
'''
# x_bounds should be sorted, so even for "discrete_int" type,
# the smallest and the largest number should the first and the last element
x_bounds_lowerbound
=
sum
([
x_bound
[
0
]
for
x_bound
in
x_bounds
])
x_bounds_upperbound
=
sum
([
x_bound
[
-
1
]
for
x_bound
in
x_bounds
])
# return ((x_bounds_lowerbound <= lowerbound) and (x_bounds_upperbound >= lowerbound)) or \
# ((x_bounds_lowerbound <= upperbound) and (x_bounds_upperbound >= upperbound))
return
(
x_bounds_lowerbound
<=
lowerbound
<=
x_bounds_upperbound
)
or
\
(
x_bounds_lowerbound
<=
upperbound
<=
x_bounds_upperbound
)
def
rand
(
x_bounds
,
x_types
,
lowerbound
,
upperbound
,
max_retries
=
100
):
'''
Key idea is that we try to move towards upperbound, by randomly choose one
value for each parameter. However, for the last parameter,
we need to make sure that its value can help us get above lowerbound
'''
outputs
=
None
if
check_feasibility
(
x_bounds
,
lowerbound
,
upperbound
)
is
True
:
# Order parameters by their range size. We want the smallest range first,
# because the corresponding parameter has less numbers to choose from
x_idx_sorted
=
[]
for
i
,
_
in
enumerate
(
x_bounds
):
if
x_types
[
i
]
==
"discrete_int"
:
x_idx_sorted
.
append
([
i
,
len
(
x_bounds
[
i
])])
elif
(
x_types
[
i
]
==
"range_int"
)
or
(
x_types
[
i
]
==
"range_continuous"
):
x_idx_sorted
.
append
([
i
,
math
.
floor
(
x_bounds
[
i
][
1
]
-
x_bounds
[
i
][
0
])])
x_idx_sorted
=
sorted
(
x_idx_sorted
,
key
=
itemgetter
(
1
))
for
_
in
range
(
max_retries
):
budget_allocated
=
0
outputs
=
[
None
]
*
len
(
x_bounds
)
for
i
,
_
in
enumerate
(
x_idx_sorted
):
x_idx
=
x_idx_sorted
[
i
][
0
]
# The amount of unallocated space that we have
budget_max
=
upperbound
-
budget_allocated
# NOT the Last x that we need to assign a random number
if
i
<
(
len
(
x_idx_sorted
)
-
1
):
if
x_bounds
[
x_idx
][
0
]
<=
budget_max
:
if
x_types
[
x_idx
]
==
"discrete_int"
:
# Note the valid integer
temp
=
[]
for
j
in
x_bounds
[
x_idx
]:
if
j
<=
budget_max
:
temp
.
append
(
j
)
# Randomly pick a number from the integer array
if
temp
:
outputs
[
x_idx
]
=
temp
[
random
.
randint
(
0
,
len
(
temp
)
-
1
)]
elif
(
x_types
[
x_idx
]
==
"range_int"
)
or
\
(
x_types
[
x_idx
]
==
"range_continuous"
):
outputs
[
x_idx
]
=
random
.
randint
(
x_bounds
[
x_idx
][
0
],
min
(
x_bounds
[
x_idx
][
-
1
],
budget_max
))
else
:
# The last x that we need to assign a random number
randint_lowerbound
=
lowerbound
-
budget_allocated
randint_lowerbound
=
0
if
randint_lowerbound
<
0
else
randint_lowerbound
# This check:
# is our smallest possible value going to overflow the available budget space,
# and is our largest possible value going to underflow the lower bound
if
(
x_bounds
[
x_idx
][
0
]
<=
budget_max
)
and
\
(
x_bounds
[
x_idx
][
-
1
]
>=
randint_lowerbound
):
if
x_types
[
x_idx
]
==
"discrete_int"
:
temp
=
[]
for
j
in
x_bounds
[
x_idx
]:
# if (j <= budget_max) and (j >= randint_lowerbound):
if
randint_lowerbound
<=
j
<=
budget_max
:
temp
.
append
(
j
)
if
temp
:
outputs
[
x_idx
]
=
temp
[
random
.
randint
(
0
,
len
(
temp
)
-
1
)]
elif
(
x_types
[
x_idx
]
==
"range_int"
)
or
\
(
x_types
[
x_idx
]
==
"range_continuous"
):
outputs
[
x_idx
]
=
random
.
randint
(
randint_lowerbound
,
min
(
x_bounds
[
x_idx
][
1
],
budget_max
))
if
outputs
[
x_idx
]
is
None
:
break
else
:
budget_allocated
+=
outputs
[
x_idx
]
if
None
not
in
outputs
:
break
return
outputs
\ No newline at end of file
src/sdk/pynni/nni/metis_tuner/lib_data.py
0 → 100644
View file @
85cb472e
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import
math
import
random
def
match_val_type
(
vals
,
vals_bounds
,
vals_types
):
'''
Update values in the array, to match their corresponding type
'''
vals_new
=
[]
for
i
,
_
in
enumerate
(
vals_types
):
if
vals_types
[
i
]
==
"discrete_int"
:
# Find the closest integer in the array, vals_bounds
vals_new
.
append
(
min
(
vals_bounds
[
i
],
key
=
lambda
x
:
abs
(
x
-
vals
[
i
])))
elif
vals_types
[
i
]
==
"range_int"
:
# Round down to the nearest integer
vals_new
.
append
(
math
.
floor
(
vals
[
i
]))
elif
vals_types
[
i
]
==
"range_continuous"
:
# Don't do any processing for continous numbers
vals_new
.
append
(
vals
[
i
])
else
:
return
None
return
vals_new
def
rand
(
x_bounds
,
x_types
):
'''
Random generate variable value within their bounds
'''
outputs
=
[]
for
i
,
_
in
enumerate
(
x_bounds
):
if
x_types
[
i
]
==
"discrete_int"
:
temp
=
x_bounds
[
i
][
random
.
randint
(
0
,
len
(
x_bounds
[
i
])
-
1
)]
outputs
.
append
(
temp
)
elif
x_types
[
i
]
==
"range_int"
:
temp
=
random
.
randint
(
x_bounds
[
i
][
0
],
x_bounds
[
i
][
1
])
outputs
.
append
(
temp
)
elif
x_types
[
i
]
==
"range_continuous"
:
temp
=
random
.
uniform
(
x_bounds
[
i
][
0
],
x_bounds
[
i
][
1
])
outputs
.
append
(
temp
)
else
:
return
None
return
outputs
\ No newline at end of file
src/sdk/pynni/nni/metis_tuner/metis_tuner.py
0 → 100644
View file @
85cb472e
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import
copy
import
logging
import
os
import
random
import
statistics
import
sys
from
enum
import
Enum
,
unique
from
multiprocessing.dummy
import
Pool
as
ThreadPool
from
nni.tuner
import
Tuner
import
nni.metis_tuner.lib_data
as
lib_data
import
nni.metis_tuner.lib_constraint_summation
as
lib_constraint_summation
import
nni.metis_tuner.Regression_GP.CreateModel
as
gp_create_model
import
nni.metis_tuner.Regression_GP.Selection
as
gp_selection
import
nni.metis_tuner.Regression_GP.Prediction
as
gp_prediction
import
nni.metis_tuner.Regression_GP.OutlierDetection
as
gp_outlier_detection
import
nni.metis_tuner.Regression_GMM.CreateModel
as
gmm_create_model
import
nni.metis_tuner.Regression_GMM.Selection
as
gmm_selection
logger
=
logging
.
getLogger
(
"Metis_Tuner_AutoML"
)
@
unique
class
OptimizeMode
(
Enum
):
'''
Optimize Mode class
'''
Minimize
=
'minimize'
Maximize
=
'maximize'
NONE_TYPE
=
''
CONSTRAINT_LOWERBOUND
=
None
CONSTRAINT_UPPERBOUND
=
None
CONSTRAINT_PARAMS_IDX
=
[]
class
MetisTuner
(
Tuner
):
'''
Metis Tuner
'''
def
__init__
(
self
,
optimize_mode
=
"maximize"
,
no_resampling
=
True
,
no_candidates
=
True
,
selection_num_starting_points
=
10
,
cold_start_num
=
10
):
'''
optimize_mode: is a string that including two mode "maximize" and "minimize"
no_resampling: True or False. Should Metis consider re-sampling as part of the search strategy?
If you are confident that the training dataset is noise-free, then you do not need re-sampling.
no_candidates: True or False. Should Metis suggest parameters for the next benchmark?
If you do not plan to do more benchmarks, Metis can skip this step.
selection_num_starting_points: how many times Metis should try to find the global optimal in the search space?
The higher the number, the longer it takes to output the solution.
cold_start_num: Metis need some trial result to get cold start. when the number of trial result is less than
cold_start_num, Metis will randomly sample hyper-parameter for trial.
'''
self
.
samples_x
=
[]
self
.
samples_y
=
[]
self
.
samples_y_aggregation
=
[]
self
.
space
=
None
self
.
no_resampling
=
no_resampling
self
.
no_candidates
=
no_candidates
self
.
optimize_mode
=
optimize_mode
self
.
key_order
=
[]
self
.
cold_start_num
=
cold_start_num
self
.
selection_num_starting_points
=
selection_num_starting_points
self
.
minimize_constraints_fun
=
None
self
.
minimize_starting_points
=
None
def
update_search_space
(
self
,
search_space
):
'''
Update the self.x_bounds and self.x_types by the search_space.json
'''
self
.
x_bounds
=
[[]
for
i
in
range
(
len
(
search_space
))]
self
.
x_types
=
[
NONE_TYPE
for
i
in
range
(
len
(
search_space
))]
for
key
in
search_space
:
self
.
key_order
.
append
(
key
)
key_type
=
{}
if
isinstance
(
search_space
,
dict
):
for
key
in
search_space
:
key_type
=
search_space
[
key
][
'_type'
]
key_range
=
search_space
[
key
][
'_value'
]
try
:
idx
=
self
.
key_order
.
index
(
key
)
except
Exception
as
ex
:
logger
.
exception
(
ex
)
raise
RuntimeError
(
"The format search space contains
\
some key that didn't define in key_order."
)
if
key_type
==
'quniform'
:
if
key_range
[
2
]
==
1
:
self
.
x_bounds
[
idx
]
=
[
key_range
[
0
],
key_range
[
1
]]
self
.
x_types
[
idx
]
=
'range_int'
else
:
bounds
=
[]
for
value
in
range
(
key_range
[
0
],
key_range
[
1
],
key_range
[
2
]):
bounds
.
append
(
value
)
self
.
x_bounds
[
idx
]
=
bounds
self
.
x_types
[
idx
]
=
'discrete_int'
elif
key_type
==
'randint'
:
self
.
x_bounds
[
idx
]
=
[
0
,
key_range
[
0
]]
self
.
x_types
[
idx
]
=
'range_int'
elif
key_type
==
'uniform'
:
self
.
x_bounds
[
idx
]
=
[
key_range
[
0
],
key_range
[
1
]]
self
.
x_types
[
idx
]
=
'range_continuous'
elif
key_type
==
'choice'
:
self
.
x_bounds
[
idx
]
=
key_range
self
.
x_types
[
idx
]
=
'discrete_int'
else
:
logger
.
info
(
"Metis Tuner doesn't support this kind of variable."
)
raise
RuntimeError
(
"Metis Tuner doesn't support this kind of variable."
)
else
:
logger
.
info
(
"The format of search space is not a dict."
)
raise
RuntimeError
(
"The format of search space is not a dict."
)
self
.
minimize_starting_points
=
_rand_init
(
self
.
x_bounds
,
self
.
x_types
,
\
self
.
selection_num_starting_points
)
def
_pack_output
(
self
,
init_parameter
):
'''
Pack the output
'''
output
=
{}
for
i
,
param
in
enumerate
(
init_parameter
):
output
[
self
.
key_order
[
i
]]
=
param
return
output
def
generate_parameters
(
self
,
parameter_id
):
'''
This function is for generate parameters to trial.
If the number of trial result is lower than cold start number,
metis will first random generate some parameters.
Otherwise, metis will choose the parameters by the Gussian Process Model and the Gussian Mixture Model.
'''
if
self
.
samples_x
or
len
(
self
.
samples_x
)
<
self
.
cold_start_num
:
init_parameter
=
_rand_init
(
self
.
x_bounds
,
self
.
x_types
,
1
)[
0
]
results
=
self
.
_pack_output
(
init_parameter
)
else
:
results
=
self
.
_selection
(
self
.
samples_x
,
self
.
samples_y_aggregation
,
self
.
samples_y
,
self
.
x_bounds
,
self
.
x_types
,
threshold_samplessize_resampling
=
(
None
if
self
.
no_resampling
is
True
else
50
),
no_candidates
=
self
.
no_candidates
,
minimize_starting_points
=
self
.
minimize_starting_points
,
minimize_constraints_fun
=
self
.
minimize_constraints_fun
)
logger
.
info
(
"Generate paramageters:
\n
"
,
str
(
results
))
return
results
def
receive_trial_result
(
self
,
parameter_id
,
parameters
,
value
):
'''
Tuner receive result from trial.
An value example as follow:
value: 99.5%
'''
value
=
self
.
extract_scalar_reward
(
value
)
if
self
.
optimize_mode
==
OptimizeMode
.
Maximize
:
value
=
-
value
logger
.
info
(
"Received trial result."
)
logger
.
info
(
"value is :"
,
str
(
value
))
logger
.
info
(
"parameter is : "
,
str
(
parameters
))
# parse parameter to sample_x
sample_x
=
[
0
for
i
in
range
(
len
(
self
.
key_order
))]
for
key
in
parameters
:
idx
=
self
.
key_order
.
index
(
key
)
sample_x
[
idx
]
=
parameters
[
key
]
# parse value to sample_y
temp_y
=
[]
if
sample_x
in
self
.
samples_x
:
idx
=
self
.
samples_x
.
index
(
sample_x
)
temp_y
=
self
.
samples_y
[
idx
]
temp_y
.
append
(
value
)
self
.
samples_y
[
idx
]
=
temp_y
# calculate y aggregation
median
=
get_median
(
temp_y
)
self
.
samples_y_aggregation
[
idx
]
=
median
else
:
self
.
samples_x
.
append
(
sample_x
)
self
.
samples_y
.
append
([
value
])
# calculate y aggregation
self
.
samples_y_aggregation
.
append
([
value
])
def
_selection
(
self
,
samples_x
,
samples_y_aggregation
,
samples_y
,
x_bounds
,
x_types
,
max_resampling_per_x
=
3
,
threshold_samplessize_exploitation
=
12
,
threshold_samplessize_resampling
=
50
,
no_candidates
=
False
,
minimize_starting_points
=
None
,
minimize_constraints_fun
=
None
):
next_candidate
=
None
candidates
=
[]
samples_size_all
=
sum
([
len
(
i
)
for
i
in
samples_y
])
samples_size_unique
=
len
(
samples_y
)
# ===== STEP 1: Compute the current optimum =====
#sys.stderr.write("[%s] Predicting the optimal configuration from the current training dataset...\n" % (os.path.basename(__file__)))
gp_model
=
gp_create_model
.
create_model
(
samples_x
,
samples_y_aggregation
)
lm_current
=
gp_selection
.
selection
(
"lm"
,
samples_y_aggregation
,
x_bounds
,
x_types
,
gp_model
[
'model'
],
minimize_starting_points
,
minimize_constraints_fun
=
minimize_constraints_fun
)
if
not
lm_current
:
return
None
if
no_candidates
is
False
:
candidates
.
append
({
'hyperparameter'
:
lm_current
[
'hyperparameter'
],
'expected_mu'
:
lm_current
[
'expected_mu'
],
'expected_sigma'
:
lm_current
[
'expected_sigma'
],
'reason'
:
"exploitation_gp"
})
# ===== STEP 2: Get recommended configurations for exploration =====
#sys.stderr.write("[%s] Getting candidates for exploration...\n"
#% \(os.path.basename(__file__)))
results_exploration
=
gp_selection
.
selection
(
"lc"
,
samples_y_aggregation
,
x_bounds
,
x_types
,
gp_model
[
'model'
],
minimize_starting_points
,
minimize_constraints_fun
=
minimize_constraints_fun
)
if
results_exploration
is
not
None
:
if
_num_past_samples
(
results_exploration
[
'hyperparameter'
],
samples_x
,
samples_y
)
==
0
:
candidates
.
append
({
'hyperparameter'
:
results_exploration
[
'hyperparameter'
],
'expected_mu'
:
results_exploration
[
'expected_mu'
],
'expected_sigma'
:
results_exploration
[
'expected_sigma'
],
'reason'
:
"exploration"
})
logger
.
info
(
"DEBUG: 1 exploration candidate selected
\n
"
)
#sys.stderr.write("[%s] DEBUG: 1 exploration candidate selected\n" % (os.path.basename(__file__)))
else
:
logger
.
info
(
"DEBUG: No suitable exploration candidates were"
)
# sys.stderr.write("[%s] DEBUG: No suitable exploration candidates were \
# found\n" % (os.path.basename(__file__)))
# ===== STEP 3: Get recommended configurations for exploitation =====
if
samples_size_all
>=
threshold_samplessize_exploitation
:
#sys.stderr.write("[%s] Getting candidates for exploitation...\n" % (os.path.basename(__file__)))
print
(
"Getting candidates for exploitation...
\n
"
)
try
:
gmm
=
gmm_create_model
.
create_model
(
samples_x
,
samples_y_aggregation
)
results_exploitation
=
gmm_selection
.
selection
(
x_bounds
,
x_types
,
gmm
[
'clusteringmodel_good'
],
gmm
[
'clusteringmodel_bad'
],
minimize_starting_points
,
minimize_constraints_fun
=
minimize_constraints_fun
)
if
results_exploitation
is
not
None
:
if
_num_past_samples
(
results_exploitation
[
'hyperparameter'
],
samples_x
,
samples_y
)
==
0
:
candidates
.
append
({
'hyperparameter'
:
results_exploitation
[
'hyperparameter'
],
\
'expected_mu'
:
results_exploitation
[
'expected_mu'
],
\
'expected_sigma'
:
results_exploitation
[
'expected_sigma'
],
\
'reason'
:
"exploitation_gmm"
})
logger
.
info
(
"DEBUG: 1 exploitation_gmm candidate selected
\n
"
)
else
:
logger
.
info
(
"DEBUG: No suitable exploitation_gmm candidates were found
\n
"
)
except
ValueError
as
exception
:
# The exception: ValueError: Fitting the mixture model failed
# because some components have ill-defined empirical covariance
# (for instance caused by singleton or collapsed samples).
# Try to decrease the number of components, or increase reg_covar.
logger
.
info
(
"DEBUG: No suitable exploitation_gmm candidates were found due to exception."
)
logger
.
info
(
exception
)
# ===== STEP 4: Get a list of outliers =====
if
(
threshold_samplessize_resampling
is
not
None
)
and
\
(
samples_size_unique
>=
threshold_samplessize_resampling
):
logger
.
info
(
"Getting candidates for re-sampling...
\n
"
)
results_outliers
=
gp_outlier_detection
.
outlierDetection_threaded
(
samples_x
,
samples_y_aggregation
)
if
results_outliers
is
not
None
:
temp
=
len
(
candidates
)
for
results_outlier
in
results_outliers
:
if
_num_past_samples
(
samples_x
[
results_outlier
[
'samples_idx'
]],
samples_x
,
samples_y
)
<
max_resampling_per_x
:
candidates
.
append
({
'hyperparameter'
:
samples_x
[
results_outlier
[
'samples_idx'
]],
\
'expected_mu'
:
results_outlier
[
'expected_mu'
],
\
'expected_sigma'
:
results_outlier
[
'expected_sigma'
],
\
'reason'
:
"resampling"
})
logger
.
info
(
"DEBUG: %d re-sampling candidates selected
\n
"
)
else
:
logger
.
info
(
"DEBUG: No suitable resampling candidates were found
\n
"
)
if
candidates
:
# ===== STEP 5: Compute the information gain of each candidate towards the optimum =====
logger
.
info
(
"Evaluating information gain of %d candidates...
\n
"
)
next_improvement
=
0
threads_inputs
=
[[
candidate
,
samples_x
,
samples_y
,
x_bounds
,
x_types
,
minimize_constraints_fun
,
minimize_starting_points
]
for
candidate
in
candidates
]
threads_pool
=
ThreadPool
(
4
)
# Evaluate what would happen if we actually sample each candidate
threads_results
=
threads_pool
.
map
(
_calculate_lowest_mu_threaded
,
threads_inputs
)
threads_pool
.
close
()
threads_pool
.
join
()
for
threads_result
in
threads_results
:
if
threads_result
[
'expected_lowest_mu'
]
<
lm_current
[
'expected_mu'
]:
# Information gain
temp_improvement
=
threads_result
[
'expected_lowest_mu'
]
-
lm_current
[
'expected_mu'
]
if
next_improvement
>
temp_improvement
:
logger
.
infor
(
"DEBUG:
\"
next_candidate
\"
changed:
\
lowest mu might reduce from %f (%s) to %f (%s), %s
\n
"
%
\
lm_current
[
'expected_mu'
],
str
(
lm_current
[
'hyperparameter'
]),
\
threads_result
[
'expected_lowest_mu'
],
\
str
(
threads_result
[
'candidate'
][
'hyperparameter'
]),
\
threads_result
[
'candidate'
][
'reason'
])
next_improvement
=
temp_improvement
next_candidate
=
threads_result
[
'candidate'
]
else
:
# ===== STEP 6: If we have no candidates, randomly pick one =====
logger
.
info
(
"DEBUG: No candidates from exploration, exploitation,
\
and resampling. We will random a candidate for next_candidate
\n
"
)
next_candidate
=
_rand_with_constraints
(
x_bounds
,
x_types
)
\
if
minimize_starting_points
is
None
else
minimize_starting_points
[
0
]
next_candidate
=
lib_data
.
match_val_type
(
next_candidate
,
x_bounds
,
x_types
)
expected_mu
,
expected_sigma
=
gp_prediction
.
predict
(
next_candidate
,
gp_model
[
'model'
])
next_candidate
=
{
'hyperparameter'
:
next_candidate
,
'reason'
:
"random"
,
'expected_mu'
:
expected_mu
,
'expected_sigma'
:
expected_sigma
}
outputs
=
self
.
_pack_output
(
lm_current
[
'hyperparameter'
])
return
outputs
def
_rand_with_constraints
(
x_bounds
,
x_types
):
outputs
=
None
x_bounds_withconstraints
=
[
x_bounds
[
i
]
for
i
in
CONSTRAINT_PARAMS_IDX
]
x_types_withconstraints
=
[
x_types
[
i
]
for
i
in
CONSTRAINT_PARAMS_IDX
]
x_val_withconstraints
=
lib_constraint_summation
.
rand
(
x_bounds_withconstraints
,
\
x_types_withconstraints
,
CONSTRAINT_LOWERBOUND
,
CONSTRAINT_UPPERBOUND
)
if
not
x_val_withconstraints
:
outputs
=
[
None
]
*
len
(
x_bounds
)
for
i
,
_
in
enumerate
(
CONSTRAINT_PARAMS_IDX
):
outputs
[
CONSTRAINT_PARAMS_IDX
[
i
]]
=
x_val_withconstraints
[
i
]
for
i
,
output
in
enumerate
(
outputs
):
if
not
output
:
outputs
[
i
]
=
random
.
randint
(
x_bounds
[
i
][
0
],
x_bounds
[
i
][
1
])
return
outputs
def
_calculate_lowest_mu_threaded
(
inputs
):
[
candidate
,
samples_x
,
samples_y
,
x_bounds
,
x_types
,
minimize_constraints_fun
,
minimize_starting_points
]
=
inputs
sys
.
stderr
.
write
(
"[%s] Evaluating information gain of %s (%s)...
\n
"
%
\
(
os
.
path
.
basename
(
__file__
),
candidate
[
'hyperparameter'
],
candidate
[
'reason'
]))
outputs
=
{
"candidate"
:
candidate
,
"expected_lowest_mu"
:
None
}
for
expected_mu
in
[
candidate
[
'expected_mu'
]
+
1.96
*
candidate
[
'expected_sigma'
],
candidate
[
'expected_mu'
]
-
1.96
*
candidate
[
'expected_sigma'
]]:
temp_samples_x
=
copy
.
deepcopy
(
samples_x
)
temp_samples_y
=
copy
.
deepcopy
(
samples_y
)
try
:
idx
=
temp_samples_x
.
index
(
candidate
[
'hyperparameter'
])
# This handles the case of re-sampling a potential outlier
temp_samples_y
[
idx
].
append
(
expected_mu
)
except
ValueError
:
temp_samples_x
.
append
(
candidate
[
'hyperparameter'
])
temp_samples_y
.
append
([
expected_mu
])
# Aggregates multiple observation of the sample sampling points
temp_y_aggregation
=
[
statistics
.
median
(
temp_sample_y
)
for
temp_sample_y
in
temp_samples_y
]
temp_gp
=
gp_create_model
.
create_model
(
temp_samples_x
,
temp_y_aggregation
)
temp_results
=
gp_selection
.
selection
(
"lm"
,
temp_y_aggregation
,
x_bounds
,
x_types
,
temp_gp
[
'model'
],
minimize_starting_points
,
minimize_constraints_fun
=
minimize_constraints_fun
)
if
outputs
[
"expected_lowest_mu"
]
is
None
or
outputs
[
"expected_lowest_mu"
]
>
temp_results
[
'expected_mu'
]:
outputs
[
"expected_lowest_mu"
]
=
temp_results
[
'expected_mu'
]
return
outputs
def
_num_past_samples
(
x
,
samples_x
,
samples_y
):
try
:
idx
=
samples_x
.
index
(
x
)
return
len
(
samples_y
[
idx
])
except
ValueError
:
logger
.
info
(
"x not in sample_x"
)
return
0
def
_rand_init
(
x_bounds
,
x_types
,
selection_num_starting_points
):
'''
Random sample some init seed within bounds.
'''
return
[
lib_data
.
rand
(
x_bounds
,
x_types
)
for
i
\
in
range
(
0
,
selection_num_starting_points
)]
def
get_median
(
temp_list
):
'''
Return median
'''
num
=
len
(
temp_list
)
temp_list
.
sort
()
print
(
temp_list
)
if
num
%
2
==
0
:
median
=
(
temp_list
[
int
(
num
/
2
)]
+
temp_list
[
int
(
num
/
2
)
-
1
])
/
2
else
:
median
=
temp_list
[
int
(
num
/
2
)]
return
median
src/sdk/pynni/nni/metis_tuner/requirments.txt
0 → 100644
View file @
85cb472e
sklearn
\ No newline at end of file
src/sdk/pynni/requirements.txt
View file @
85cb472e
...
@@ -4,4 +4,7 @@ json_tricks
...
@@ -4,4 +4,7 @@ json_tricks
# hyperopt tuner
# hyperopt tuner
numpy
numpy
scipy
scipy
hyperopt
hyperopt
\ No newline at end of file
# metis tuner
sklearn
src/webui/src/components/Overview.tsx
View file @
85cb472e
...
@@ -209,6 +209,10 @@ class Overview extends React.Component<{}, OverviewState> {
...
@@ -209,6 +209,10 @@ class Overview extends React.Component<{}, OverviewState> {
profile
.
failTrial
+=
1
;
profile
.
failTrial
+=
1
;
break
;
break
;
case
'
RUNNING
'
:
profile
.
runTrial
+=
1
;
break
;
case
'
USER_CANCELED
'
:
case
'
USER_CANCELED
'
:
case
'
SYS_CANCELED
'
:
case
'
SYS_CANCELED
'
:
profile
.
stopTrial
+=
1
;
profile
.
stopTrial
+=
1
;
...
...
src/webui/src/components/TrialsDetail.tsx
View file @
85cb472e
import
*
as
React
from
'
react
'
;
import
*
as
React
from
'
react
'
;
import
axios
from
'
axios
'
;
import
axios
from
'
axios
'
;
import
{
MANAGER_IP
}
from
'
../static/const
'
;
import
{
MANAGER_IP
}
from
'
../static/const
'
;
import
{
Row
,
Col
,
Tabs
,
Input
,
Select
}
from
'
antd
'
;
import
{
Row
,
Col
,
Tabs
,
Input
,
Select
,
Button
}
from
'
antd
'
;
const
Option
=
Select
.
Option
;
const
Option
=
Select
.
Option
;
import
{
TableObj
,
Parameters
,
DetailAccurPoint
,
TooltipForAccuracy
}
from
'
../static/interface
'
;
import
{
TableObj
Fianl
,
Parameters
,
DetailAccurPoint
,
TooltipForAccuracy
}
from
'
../static/interface
'
;
import
{
getFinalResult
}
from
'
../static/function
'
;
import
{
getFinalResult
,
getFinal
}
from
'
../static/function
'
;
import
Accuracy
from
'
./overview/Accuracy
'
;
import
Accuracy
from
'
./overview/Accuracy
'
;
import
Duration
from
'
./trial-detail/Duration
'
;
import
Duration
from
'
./trial-detail/Duration
'
;
import
Title1
from
'
./overview/Title1
'
;
import
Title1
from
'
./overview/Title1
'
;
...
@@ -16,8 +16,8 @@ import '../static/style/trialsDetail.scss';
...
@@ -16,8 +16,8 @@ import '../static/style/trialsDetail.scss';
interface
TrialDetailState
{
interface
TrialDetailState
{
accSource
:
object
;
accSource
:
object
;
accNodata
:
string
;
accNodata
:
string
;
tableListSource
:
Array
<
TableObj
>
;
tableListSource
:
Array
<
TableObj
Fianl
>
;
searchResultSource
:
Array
<
TableObj
>
;
searchResultSource
:
Array
<
TableObj
Fianl
>
;
isHasSearch
:
boolean
;
isHasSearch
:
boolean
;
experimentStatus
:
string
;
experimentStatus
:
string
;
entriesTable
:
number
;
entriesTable
:
number
;
...
@@ -30,6 +30,8 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
...
@@ -30,6 +30,8 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
public
interTableList
=
1
;
public
interTableList
=
1
;
public
interAllTableList
=
2
;
public
interAllTableList
=
2
;
public
tableList
:
TableList
|
null
;
constructor
(
props
:
{})
{
constructor
(
props
:
{})
{
super
(
props
);
super
(
props
);
...
@@ -40,7 +42,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
...
@@ -40,7 +42,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
searchResultSource
:
[],
searchResultSource
:
[],
experimentStatus
:
''
,
experimentStatus
:
''
,
entriesTable
:
20
,
entriesTable
:
20
,
isHasSearch
:
false
isHasSearch
:
false
,
};
};
}
}
// trial accuracy graph
// trial accuracy graph
...
@@ -132,7 +134,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
...
@@ -132,7 +134,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
.
then
(
res
=>
{
.
then
(
res
=>
{
if
(
res
.
status
===
200
)
{
if
(
res
.
status
===
200
)
{
const
trialJobs
=
res
.
data
;
const
trialJobs
=
res
.
data
;
const
trialTable
:
Array
<
TableObj
>
=
[];
const
trialTable
:
Array
<
TableObj
Fianl
>
=
[];
Object
.
keys
(
trialJobs
).
map
(
item
=>
{
Object
.
keys
(
trialJobs
).
map
(
item
=>
{
// only succeeded trials have finalMetricData
// only succeeded trials have finalMetricData
let
desc
:
Parameters
=
{
let
desc
:
Parameters
=
{
...
@@ -167,7 +169,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
...
@@ -167,7 +169,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
if
(
trialJobs
[
item
].
logPath
!==
undefined
)
{
if
(
trialJobs
[
item
].
logPath
!==
undefined
)
{
desc
.
logPath
=
trialJobs
[
item
].
logPath
;
desc
.
logPath
=
trialJobs
[
item
].
logPath
;
}
}
const
acc
=
getFinal
Result
(
trialJobs
[
item
].
finalMetricData
);
const
acc
=
getFinal
(
trialJobs
[
item
].
finalMetricData
);
trialTable
.
push
({
trialTable
.
push
({
key
:
trialTable
.
length
,
key
:
trialTable
.
length
,
sequenceId
:
trialJobs
[
item
].
sequenceId
,
sequenceId
:
trialJobs
[
item
].
sequenceId
,
...
@@ -185,7 +187,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
...
@@ -185,7 +187,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
Object
.
keys
(
searchResultSource
).
map
(
index
=>
{
Object
.
keys
(
searchResultSource
).
map
(
index
=>
{
temp
.
push
(
searchResultSource
[
index
].
id
);
temp
.
push
(
searchResultSource
[
index
].
id
);
});
});
const
searchResultList
:
Array
<
TableObj
>
=
[];
const
searchResultList
:
Array
<
TableObj
Fianl
>
=
[];
for
(
let
i
=
0
;
i
<
temp
.
length
;
i
++
)
{
for
(
let
i
=
0
;
i
<
temp
.
length
;
i
++
)
{
Object
.
keys
(
trialTable
).
map
(
key
=>
{
Object
.
keys
(
trialTable
).
map
(
key
=>
{
const
item
=
trialTable
[
key
];
const
item
=
trialTable
[
key
];
...
@@ -217,7 +219,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
...
@@ -217,7 +219,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
.
then
(
res
=>
{
.
then
(
res
=>
{
if
(
res
.
status
===
200
)
{
if
(
res
.
status
===
200
)
{
const
trialJobs
=
res
.
data
;
const
trialJobs
=
res
.
data
;
const
trialTable
:
Array
<
TableObj
>
=
[];
const
trialTable
:
Array
<
TableObj
Fianl
>
=
[];
Object
.
keys
(
trialJobs
).
map
(
item
=>
{
Object
.
keys
(
trialJobs
).
map
(
item
=>
{
// only succeeded trials have finalMetricData
// only succeeded trials have finalMetricData
let
desc
:
Parameters
=
{
let
desc
:
Parameters
=
{
...
@@ -252,7 +254,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
...
@@ -252,7 +254,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
if
(
trialJobs
[
item
].
logPath
!==
undefined
)
{
if
(
trialJobs
[
item
].
logPath
!==
undefined
)
{
desc
.
logPath
=
trialJobs
[
item
].
logPath
;
desc
.
logPath
=
trialJobs
[
item
].
logPath
;
}
}
const
acc
=
getFinal
Result
(
trialJobs
[
item
].
finalMetricData
);
const
acc
=
getFinal
(
trialJobs
[
item
].
finalMetricData
);
trialTable
.
push
({
trialTable
.
push
({
key
:
trialTable
.
length
,
key
:
trialTable
.
length
,
sequenceId
:
trialJobs
[
item
].
sequenceId
,
sequenceId
:
trialJobs
[
item
].
sequenceId
,
...
@@ -308,7 +310,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
...
@@ -308,7 +310,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
}
else
{
}
else
{
window
.
clearInterval
(
this
.
interAllTableList
);
window
.
clearInterval
(
this
.
interAllTableList
);
const
{
tableListSource
}
=
this
.
state
;
const
{
tableListSource
}
=
this
.
state
;
const
searchResultList
:
Array
<
TableObj
>
=
[];
const
searchResultList
:
Array
<
TableObj
Fianl
>
=
[];
Object
.
keys
(
tableListSource
).
map
(
key
=>
{
Object
.
keys
(
tableListSource
).
map
(
key
=>
{
const
item
=
tableListSource
[
key
];
const
item
=
tableListSource
[
key
];
if
(
item
.
sequenceId
.
toString
()
===
targetValue
||
item
.
id
.
includes
(
targetValue
))
{
if
(
item
.
sequenceId
.
toString
()
===
targetValue
||
item
.
id
.
includes
(
targetValue
))
{
...
@@ -364,6 +366,10 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
...
@@ -364,6 +366,10 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
}
}
}
}
test
=
()
=>
{
alert
(
'
TableList component was not properly initialized.
'
);
}
componentDidMount
()
{
componentDidMount
()
{
this
.
_isMounted
=
true
;
this
.
_isMounted
=
true
;
...
@@ -429,13 +435,26 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
...
@@ -429,13 +435,26 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
<
span
>
entries
</
span
>
<
span
>
entries
</
span
>
</
Col
>
</
Col
>
<
Col
span
=
{
12
}
className
=
"right"
>
<
Col
span
=
{
12
}
className
=
"right"
>
{
/* <span>Search:</span> */
}
<
Row
>
<
Input
<
Col
span
=
{
12
}
>
type
=
"text"
<
Button
placeholder
=
"search by Trial No. and id"
type
=
"primary"
onChange
=
{
this
.
searchTrial
}
className
=
"tableButton editStyle"
style
=
{
{
width
:
200
,
marginLeft
:
6
}
}
onClick
=
{
this
.
tableList
?
this
.
tableList
.
addColumn
:
this
.
test
}
/>
>
AddColumn
</
Button
>
</
Col
>
<
Col
span
=
{
12
}
>
{
/* <span>Search:</span> */
}
<
Input
type
=
"text"
placeholder
=
"search by Trial No. and id"
onChange
=
{
this
.
searchTrial
}
style
=
{
{
width
:
200
,
marginLeft
:
6
}
}
/>
</
Col
>
</
Row
>
</
Col
>
</
Col
>
</
Row
>
</
Row
>
<
TableList
<
TableList
...
@@ -444,6 +463,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
...
@@ -444,6 +463,7 @@ class TrialsDetail extends React.Component<{}, TrialDetailState> {
updateList
=
{
this
.
drawTableList
}
updateList
=
{
this
.
drawTableList
}
searchResult
=
{
searchResultSource
}
searchResult
=
{
searchResultSource
}
isHasSearch
=
{
isHasSearch
}
isHasSearch
=
{
isHasSearch
}
ref
=
{
(
tabList
)
=>
this
.
tableList
=
tabList
}
/>
/>
</
div
>
</
div
>
);
);
...
...
src/webui/src/components/overview/Progress.tsx
View file @
85cb472e
...
@@ -242,45 +242,45 @@ class Progressed extends React.Component<ProgressProps, ProgressState> {
...
@@ -242,45 +242,45 @@ class Progressed extends React.Component<ProgressProps, ProgressState> {
maxString
=
{
`MaxTrialNumber:
${
trialProfile
.
MaxTrialNum
}
`
}
maxString
=
{
`MaxTrialNumber:
${
trialProfile
.
MaxTrialNum
}
`
}
/>
/>
<
Row
className
=
"basic colorOfbasic mess"
>
<
Row
className
=
"basic colorOfbasic mess"
>
<
p
>
B
est
Default M
etric
</
p
>
<
p
>
b
est
m
etric
</
p
>
<
div
>
{
bestAccuracy
}
</
div
>
<
div
>
{
bestAccuracy
}
</
div
>
</
Row
>
</
Row
>
<
Row
className
=
"mess"
>
<
Row
className
=
"mess"
>
<
Col
span
=
{
8
}
>
<
Col
span
=
{
8
}
>
<
Row
className
=
"basic colorOfbasic"
>
<
Row
className
=
"basic colorOfbasic"
>
<
p
>
Time S
pent
</
p
>
<
p
>
s
pent
</
p
>
<
div
>
{
convertTime
(
trialProfile
.
execDuration
)
}
</
div
>
<
div
>
{
convertTime
(
trialProfile
.
execDuration
)
}
</
div
>
</
Row
>
</
Row
>
</
Col
>
</
Col
>
<
Col
span
=
{
9
}
>
<
Col
span
=
{
9
}
>
<
Row
className
=
"basic colorOfbasic"
>
<
Row
className
=
"basic colorOfbasic"
>
<
p
>
R
emaining
Time
</
p
>
<
p
>
r
emaining
</
p
>
<
div
>
{
remaining
}
</
div
>
<
div
>
{
remaining
}
</
div
>
</
Row
>
</
Row
>
</
Col
>
</
Col
>
<
Col
span
=
{
7
}
>
<
Col
span
=
{
7
}
>
<
Row
className
=
"basic colorOfbasic"
>
<
Row
className
=
"basic colorOfbasic"
>
<
p
>
MaxDuration
</
p
>
<
p
>
running
</
p
>
<
div
>
{
convertTime
(
trialProfile
.
maxDuration
)
}
</
div
>
<
div
>
{
trialNumber
.
runTrial
}
</
div
>
</
Row
>
</
Row
>
</
Col
>
</
Col
>
</
Row
>
</
Row
>
<
Row
className
=
"mess"
>
<
Row
className
=
"mess"
>
<
Col
span
=
{
8
}
>
<
Col
span
=
{
8
}
>
<
Row
className
=
"basic colorOfbasic"
>
<
Row
className
=
"basic colorOfbasic"
>
<
p
>
S
ucceed
Trial
</
p
>
<
p
>
s
ucceed
</
p
>
<
div
>
{
trialNumber
.
succTrial
}
</
div
>
<
div
>
{
trialNumber
.
succTrial
}
</
div
>
</
Row
>
</
Row
>
</
Col
>
</
Col
>
<
Col
span
=
{
9
}
>
<
Col
span
=
{
9
}
>
<
Row
className
=
"basic"
>
<
Row
className
=
"basic"
>
<
p
>
S
topped
Trial
</
p
>
<
p
>
s
topped
</
p
>
<
div
>
{
trialNumber
.
stopTrial
}
</
div
>
<
div
>
{
trialNumber
.
stopTrial
}
</
div
>
</
Row
>
</
Row
>
</
Col
>
</
Col
>
<
Col
span
=
{
7
}
>
<
Col
span
=
{
7
}
>
<
Row
className
=
"basic"
>
<
Row
className
=
"basic"
>
<
p
>
F
ailed
Trial
</
p
>
<
p
>
f
ailed
</
p
>
<
div
>
{
trialNumber
.
failTrial
}
</
div
>
<
div
>
{
trialNumber
.
failTrial
}
</
div
>
</
Row
>
</
Row
>
</
Col
>
</
Col
>
...
...
src/webui/src/components/trial-detail/TableList.tsx
View file @
85cb472e
...
@@ -2,11 +2,13 @@ import * as React from 'react';
...
@@ -2,11 +2,13 @@ import * as React from 'react';
import
axios
from
'
axios
'
;
import
axios
from
'
axios
'
;
import
JSONTree
from
'
react-json-tree
'
;
import
JSONTree
from
'
react-json-tree
'
;
import
ReactEcharts
from
'
echarts-for-react
'
;
import
ReactEcharts
from
'
echarts-for-react
'
;
import
{
Row
,
Table
,
Button
,
Popconfirm
,
Modal
,
message
}
from
'
antd
'
;
import
{
Row
,
Table
,
Button
,
Popconfirm
,
Modal
,
message
,
Checkbox
}
from
'
antd
'
;
import
{
MANAGER_IP
,
trialJobStatus
}
from
'
../../static/const
'
;
const
CheckboxGroup
=
Checkbox
.
Group
;
import
{
MANAGER_IP
,
trialJobStatus
,
COLUMN
,
COLUMN_INDEX
}
from
'
../../static/const
'
;
import
{
convertDuration
}
from
'
../../static/function
'
;
import
{
convertDuration
}
from
'
../../static/function
'
;
import
{
TableObj
,
TrialJob
}
from
'
../../static/interface
'
;
import
{
TableObj
Fianl
,
TrialJob
}
from
'
../../static/interface
'
;
import
LogPath
from
'
../logPath/LogPath
'
;
import
LogPath
from
'
../logPath/LogPath
'
;
import
'
../../static/style/search.scss
'
;
require
(
'
../../static/style/tableStatus.css
'
);
require
(
'
../../static/style/tableStatus.css
'
);
require
(
'
../../static/style/logPath.scss
'
);
require
(
'
../../static/style/logPath.scss
'
);
require
(
'
../../static/style/search.scss
'
);
require
(
'
../../static/style/search.scss
'
);
...
@@ -22,8 +24,8 @@ echarts.registerTheme('my_theme', {
...
@@ -22,8 +24,8 @@ echarts.registerTheme('my_theme', {
interface
TableListProps
{
interface
TableListProps
{
entries
:
number
;
entries
:
number
;
tableSource
:
Array
<
TableObj
>
;
tableSource
:
Array
<
TableObj
Fianl
>
;
searchResult
:
Array
<
TableObj
>
;
searchResult
:
Array
<
TableObj
Fianl
>
;
updateList
:
Function
;
updateList
:
Function
;
isHasSearch
:
boolean
;
isHasSearch
:
boolean
;
}
}
...
@@ -31,6 +33,14 @@ interface TableListProps {
...
@@ -31,6 +33,14 @@ interface TableListProps {
interface
TableListState
{
interface
TableListState
{
intermediateOption
:
object
;
intermediateOption
:
object
;
modalVisible
:
boolean
;
modalVisible
:
boolean
;
isObjFinal
:
boolean
;
isShowColumn
:
boolean
;
columnSelected
:
Array
<
string
>
;
// user select columnKeys
}
interface
ColumnIndex
{
name
:
string
;
index
:
number
;
}
}
class
TableList
extends
React
.
Component
<
TableListProps
,
TableListState
>
{
class
TableList
extends
React
.
Component
<
TableListProps
,
TableListState
>
{
...
@@ -41,7 +51,10 @@ class TableList extends React.Component<TableListProps, TableListState> {
...
@@ -41,7 +51,10 @@ class TableList extends React.Component<TableListProps, TableListState> {
this
.
state
=
{
this
.
state
=
{
intermediateOption
:
{},
intermediateOption
:
{},
modalVisible
:
false
modalVisible
:
false
,
isObjFinal
:
false
,
isShowColumn
:
false
,
columnSelected
:
COLUMN
,
};
};
}
}
...
@@ -79,6 +92,14 @@ class TableList extends React.Component<TableListProps, TableListState> {
...
@@ -79,6 +92,14 @@ class TableList extends React.Component<TableListProps, TableListState> {
}
}
}
}
hideShowColumnModal
=
()
=>
{
if
(
this
.
_isMounted
)
{
this
.
setState
({
isShowColumn
:
false
});
}
}
intermediateGraphOption
=
(
intermediateArr
:
number
[],
id
:
string
)
=>
{
intermediateGraphOption
=
(
intermediateArr
:
number
[],
id
:
string
)
=>
{
const
sequence
:
number
[]
=
[];
const
sequence
:
number
[]
=
[];
const
lengthInter
=
intermediateArr
.
length
;
const
lengthInter
=
intermediateArr
.
length
;
...
@@ -143,6 +164,67 @@ class TableList extends React.Component<TableListProps, TableListState> {
...
@@ -143,6 +164,67 @@ class TableList extends React.Component<TableListProps, TableListState> {
});
});
}
}
// click add column btn, just show the modal of addcolumn
addColumn
=
()
=>
{
// show user select check button
if
(
this
.
_isMounted
)
{
this
.
setState
({
isShowColumn
:
true
});
}
}
// checkbox for coloumn
selectedColumn
=
(
checkedValues
:
Array
<
string
>
)
=>
{
let
count
=
6
;
const
want
:
Array
<
object
>
=
[];
const
finalKeys
:
Array
<
string
>
=
[];
const
wantResult
:
Array
<
string
>
=
[];
Object
.
keys
(
checkedValues
).
map
(
m
=>
{
switch
(
checkedValues
[
m
])
{
case
'
Trial No
'
:
case
'
id
'
:
case
'
duration
'
:
case
'
status
'
:
case
'
Operation
'
:
case
'
Default
'
:
case
'
Intermediate Result
'
:
break
;
default
:
finalKeys
.
push
(
checkedValues
[
m
]);
}
});
Object
.
keys
(
finalKeys
).
map
(
n
=>
{
want
.
push
({
name
:
finalKeys
[
n
],
index
:
count
++
});
});
Object
.
keys
(
checkedValues
).
map
(
item
=>
{
const
temp
=
checkedValues
[
item
];
Object
.
keys
(
COLUMN_INDEX
).
map
(
key
=>
{
const
index
=
COLUMN_INDEX
[
key
];
if
(
index
.
name
===
temp
)
{
want
.
push
(
index
);
}
});
});
want
.
sort
((
a
:
ColumnIndex
,
b
:
ColumnIndex
)
=>
{
return
a
.
index
-
b
.
index
;
});
Object
.
keys
(
want
).
map
(
i
=>
{
wantResult
.
push
(
want
[
i
].
name
);
});
if
(
this
.
_isMounted
)
{
this
.
setState
(()
=>
({
columnSelected
:
wantResult
}));
}
}
componentDidMount
()
{
componentDidMount
()
{
this
.
_isMounted
=
true
;
this
.
_isMounted
=
true
;
}
}
...
@@ -154,7 +236,27 @@ class TableList extends React.Component<TableListProps, TableListState> {
...
@@ -154,7 +236,27 @@ class TableList extends React.Component<TableListProps, TableListState> {
render
()
{
render
()
{
const
{
entries
,
tableSource
,
searchResult
,
isHasSearch
}
=
this
.
props
;
const
{
entries
,
tableSource
,
searchResult
,
isHasSearch
}
=
this
.
props
;
const
{
intermediateOption
,
modalVisible
}
=
this
.
state
;
const
{
intermediateOption
,
modalVisible
,
isShowColumn
,
columnSelected
,
}
=
this
.
state
;
let
showTitle
=
COLUMN
;
if
(
tableSource
.
length
>=
1
)
{
const
temp
=
tableSource
[
0
].
acc
;
if
(
temp
!==
undefined
&&
typeof
temp
===
'
object
'
)
{
if
(
this
.
_isMounted
)
{
// concat default column and finalkeys
const
item
=
Object
.
keys
(
temp
);
const
want
:
Array
<
string
>
=
[];
Object
.
keys
(
item
).
map
(
key
=>
{
if
(
item
[
key
]
!==
'
default
'
)
{
want
.
push
(
item
[
key
]);
}
});
showTitle
=
COLUMN
.
concat
(
want
);
}
}
}
let
bgColor
=
''
;
let
bgColor
=
''
;
const
trialJob
:
Array
<
TrialJob
>
=
[];
const
trialJob
:
Array
<
TrialJob
>
=
[];
trialJobStatus
.
map
(
item
=>
{
trialJobStatus
.
map
(
item
=>
{
...
@@ -163,146 +265,202 @@ class TableList extends React.Component<TableListProps, TableListState> {
...
@@ -163,146 +265,202 @@ class TableList extends React.Component<TableListProps, TableListState> {
value
:
item
value
:
item
});
});
});
});
const
showColumn
:
Array
<
object
>
=
[];
const
columns
=
[{
Object
.
keys
(
columnSelected
).
map
(
key
=>
{
title
:
'
Trial No.
'
,
const
item
=
columnSelected
[
key
];
dataIndex
:
'
sequenceId
'
,
switch
(
item
)
{
key
:
'
sequenceId
'
,
case
'
Trial No
'
:
width
:
120
,
showColumn
.
push
({
className
:
'
tableHead
'
,
title
:
'
Trial No.
'
,
sorter
:
(
a
:
TableObj
,
b
:
TableObj
)
=>
(
a
.
sequenceId
as
number
)
-
(
b
.
sequenceId
as
number
)
dataIndex
:
'
sequenceId
'
,
},
{
key
:
'
sequenceId
'
,
title
:
'
Id
'
,
width
:
120
,
dataIndex
:
'
id
'
,
className
:
'
tableHead
'
,
key
:
'
id
'
,
sorter
:
width
:
60
,
(
a
:
TableObjFianl
,
b
:
TableObjFianl
)
=>
className
:
'
tableHead idtitle
'
,
(
a
.
sequenceId
as
number
)
-
(
b
.
sequenceId
as
number
)
// the sort of string
});
sorter
:
(
a
:
TableObj
,
b
:
TableObj
):
number
=>
a
.
id
.
localeCompare
(
b
.
id
),
break
;
render
:
(
text
:
string
,
record
:
TableObj
)
=>
{
case
'
id
'
:
return
(
showColumn
.
push
({
<
div
>
{
record
.
id
}
</
div
>
title
:
'
Id
'
,
);
dataIndex
:
'
id
'
,
}
key
:
'
id
'
,
},
{
width
:
60
,
title
:
'
Duration
'
,
className
:
'
tableHead idtitle
'
,
dataIndex
:
'
duration
'
,
// the sort of string
key
:
'
duration
'
,
sorter
:
(
a
:
TableObjFianl
,
b
:
TableObjFianl
):
number
=>
a
.
id
.
localeCompare
(
b
.
id
),
width
:
140
,
render
:
(
text
:
string
,
record
:
TableObjFianl
)
=>
{
// the sort of number
return
(
sorter
:
(
a
:
TableObj
,
b
:
TableObj
)
=>
(
a
.
duration
as
number
)
-
(
b
.
duration
as
number
),
<
div
>
{
record
.
id
}
</
div
>
render
:
(
text
:
string
,
record
:
TableObj
)
=>
{
);
let
duration
;
}
if
(
record
.
duration
!==
undefined
&&
record
.
duration
>
0
)
{
});
duration
=
convertDuration
(
record
.
duration
);
break
;
}
else
{
case
'
duration
'
:
duration
=
0
;
showColumn
.
push
({
}
title
:
'
Duration
'
,
return
(
dataIndex
:
'
duration
'
,
<
div
className
=
"durationsty"
><
div
>
{
duration
}
</
div
></
div
>
key
:
'
duration
'
,
);
width
:
140
,
},
// the sort of number
},
{
sorter
:
(
a
:
TableObjFianl
,
b
:
TableObjFianl
)
=>
(
a
.
duration
as
number
)
-
(
b
.
duration
as
number
),
title
:
'
Status
'
,
render
:
(
text
:
string
,
record
:
TableObjFianl
)
=>
{
dataIndex
:
'
status
'
,
let
duration
;
key
:
'
status
'
,
if
(
record
.
duration
!==
undefined
&&
record
.
duration
>
0
)
{
width
:
150
,
duration
=
convertDuration
(
record
.
duration
);
className
:
'
tableStatus
'
,
}
else
{
render
:
(
text
:
string
,
record
:
TableObj
)
=>
{
duration
=
0
;
bgColor
=
record
.
status
;
}
return
(
return
(
<
span
className
=
{
`
${
bgColor
}
commonStyle`
}
>
{
record
.
status
}
</
span
>
<
div
className
=
"durationsty"
><
div
>
{
duration
}
</
div
></
div
>
);
);
},
},
filters
:
trialJob
,
});
onFilter
:
(
value
:
string
,
record
:
TableObj
)
=>
record
.
status
.
indexOf
(
value
)
===
0
,
break
;
sorter
:
(
a
:
TableObj
,
b
:
TableObj
):
number
=>
a
.
status
.
localeCompare
(
b
.
status
)
case
'
status
'
:
},
{
showColumn
.
push
({
title
:
'
Default Metric
'
,
title
:
'
Status
'
,
dataIndex
:
'
acc
'
,
dataIndex
:
'
status
'
,
key
:
'
acc
'
,
key
:
'
status
'
,
width
:
200
,
width
:
150
,
sorter
:
(
a
:
TableObj
,
b
:
TableObj
)
=>
(
a
.
acc
as
number
)
-
(
b
.
acc
as
number
),
className
:
'
tableStatus
'
,
render
:
(
text
:
string
,
record
:
TableObj
)
=>
{
render
:
(
text
:
string
,
record
:
TableObjFianl
)
=>
{
const
accuracy
=
record
.
acc
;
bgColor
=
record
.
status
;
let
wei
=
0
;
return
(
if
(
accuracy
)
{
<
span
className
=
{
`
${
bgColor
}
commonStyle`
}
>
{
record
.
status
}
</
span
>
if
(
accuracy
.
toString
().
indexOf
(
'
.
'
)
!==
-
1
)
{
);
wei
=
accuracy
.
toString
().
length
-
accuracy
.
toString
().
indexOf
(
'
.
'
)
-
1
;
},
}
filters
:
trialJob
,
}
onFilter
:
(
value
:
string
,
record
:
TableObjFianl
)
=>
record
.
status
.
indexOf
(
value
)
===
0
,
return
(
sorter
:
(
a
:
TableObjFianl
,
b
:
TableObjFianl
):
number
=>
a
.
status
.
localeCompare
(
b
.
status
)
<
div
>
});
{
break
;
record
.
acc
case
'
Default
'
:
?
showColumn
.
push
({
wei
>
6
title
:
'
Default Metric
'
,
dataIndex
:
'
acc
'
,
key
:
'
acc
'
,
width
:
200
,
sorter
:
(
a
:
TableObjFianl
,
b
:
TableObjFianl
)
=>
{
if
(
a
.
acc
!==
undefined
&&
b
.
acc
!==
undefined
)
{
return
JSON
.
parse
(
a
.
acc
.
default
)
-
JSON
.
parse
(
b
.
acc
.
default
);
}
else
{
return
NaN
;
}
},
render
:
(
text
:
string
,
record
:
TableObjFianl
)
=>
{
let
accuracy
;
if
(
record
.
acc
!==
undefined
)
{
accuracy
=
record
.
acc
.
default
;
}
let
wei
=
0
;
if
(
accuracy
)
{
if
(
accuracy
.
toString
().
indexOf
(
'
.
'
)
!==
-
1
)
{
wei
=
accuracy
.
toString
().
length
-
accuracy
.
toString
().
indexOf
(
'
.
'
)
-
1
;
}
}
return
(
<
div
>
{
record
.
acc
&&
record
.
acc
.
default
?
wei
>
6
?
JSON
.
parse
(
record
.
acc
.
default
).
toFixed
(
6
)
:
record
.
acc
.
default
:
'
--
'
}
</
div
>
);
}
});
break
;
case
'
Operation
'
:
showColumn
.
push
({
title
:
'
Operation
'
,
dataIndex
:
'
operation
'
,
key
:
'
operation
'
,
width
:
90
,
render
:
(
text
:
string
,
record
:
TableObjFianl
)
=>
{
let
trialStatus
=
record
.
status
;
let
flagKill
=
false
;
if
(
trialStatus
===
'
RUNNING
'
)
{
flagKill
=
true
;
}
else
{
flagKill
=
false
;
}
return
(
flagKill
?
?
record
.
acc
.
toFixed
(
6
)
(
<
Popconfirm
title
=
"Are you sure to cancel this trial?"
onConfirm
=
{
this
.
killJob
.
bind
(
this
,
record
.
key
,
record
.
id
,
record
.
status
)
}
>
<
Button
type
=
"primary"
className
=
"tableButton"
>
Kill
</
Button
>
</
Popconfirm
>
)
:
:
record
.
acc
(
:
<
Button
'
--
'
type
=
"primary"
className
=
"tableButton"
disabled
=
{
true
}
>
Kill
</
Button
>
)
);
},
});
break
;
case
'
Intermediate Result
'
:
showColumn
.
push
({
title
:
'
Intermediate Result
'
,
dataIndex
:
'
intermediate
'
,
key
:
'
intermediate
'
,
width
:
'
16%
'
,
render
:
(
text
:
string
,
record
:
TableObjFianl
)
=>
{
return
(
<
Button
type
=
"primary"
className
=
"tableButton"
onClick
=
{
this
.
showIntermediateModal
.
bind
(
this
,
record
.
id
)
}
>
Intermediate
</
Button
>
);
},
});
break
;
default
:
showColumn
.
push
({
title
:
item
,
dataIndex
:
item
,
key
:
item
,
width
:
150
,
render
:
(
text
:
string
,
record
:
TableObjFianl
)
=>
{
return
(
<
div
>
{
record
.
acc
?
record
.
acc
[
item
]
:
'
--
'
}
</
div
>
);
}
}
</
div
>
});
);
}
}
},
{
});
title
:
'
Operation
'
,
dataIndex
:
'
operation
'
,
key
:
'
operation
'
,
width
:
90
,
render
:
(
text
:
string
,
record
:
TableObj
)
=>
{
let
trialStatus
=
record
.
status
;
let
flagKill
=
false
;
if
(
trialStatus
===
'
RUNNING
'
)
{
flagKill
=
true
;
}
else
{
flagKill
=
false
;
}
return
(
flagKill
?
(
<
Popconfirm
title
=
"Are you sure to cancel this trial?"
onConfirm
=
{
this
.
killJob
.
bind
(
this
,
record
.
key
,
record
.
id
,
record
.
status
)
}
>
<
Button
type
=
"primary"
className
=
"tableButton"
>
Kill
</
Button
>
</
Popconfirm
>
)
:
(
<
Button
type
=
"primary"
className
=
"tableButton"
disabled
=
{
true
}
>
Kill
</
Button
>
)
);
},
},
{
title
:
'
Intermediate Result
'
,
dataIndex
:
'
intermediate
'
,
key
:
'
intermediate
'
,
width
:
'
16%
'
,
render
:
(
text
:
string
,
record
:
TableObj
)
=>
{
return
(
<
Button
type
=
"primary"
className
=
"tableButton"
onClick
=
{
this
.
showIntermediateModal
.
bind
(
this
,
record
.
id
)
}
>
Intermediate
</
Button
>
);
},
}
];
const
openRow
=
(
record
:
TableObj
)
=>
{
const
openRow
=
(
record
:
TableObj
Fianl
)
=>
{
let
isHasParameters
=
true
;
let
isHasParameters
=
true
;
if
(
record
.
description
.
parameters
.
error
)
{
if
(
record
.
description
.
parameters
.
error
)
{
isHasParameters
=
false
;
isHasParameters
=
false
;
...
@@ -341,12 +499,13 @@ class TableList extends React.Component<TableListProps, TableListState> {
...
@@ -341,12 +499,13 @@ class TableList extends React.Component<TableListProps, TableListState> {
<
Row
className
=
"tableList"
>
<
Row
className
=
"tableList"
>
<
div
id
=
"tableList"
>
<
div
id
=
"tableList"
>
<
Table
<
Table
columns
=
{
c
olumn
s
}
columns
=
{
showC
olumn
}
expandedRowRender
=
{
openRow
}
expandedRowRender
=
{
openRow
}
dataSource
=
{
isHasSearch
?
searchResult
:
tableSource
}
dataSource
=
{
isHasSearch
?
searchResult
:
tableSource
}
className
=
"commonTableStyle"
className
=
"commonTableStyle"
pagination
=
{
{
pageSize
:
entries
}
}
pagination
=
{
{
pageSize
:
entries
}
}
/>
/>
{
/* Intermediate Result Modal */
}
<
Modal
<
Modal
title
=
"Intermediate Result"
title
=
"Intermediate Result"
visible
=
{
modalVisible
}
visible
=
{
modalVisible
}
...
@@ -365,6 +524,22 @@ class TableList extends React.Component<TableListProps, TableListState> {
...
@@ -365,6 +524,22 @@ class TableList extends React.Component<TableListProps, TableListState> {
/>
/>
</
Modal
>
</
Modal
>
</
div
>
</
div
>
{
/* Add Column Modal */
}
<
Modal
title
=
"Table Title"
visible
=
{
isShowColumn
}
onCancel
=
{
this
.
hideShowColumnModal
}
footer
=
{
null
}
destroyOnClose
=
{
true
}
width
=
"40%"
>
<
CheckboxGroup
options
=
{
showTitle
}
defaultValue
=
{
columnSelected
}
onChange
=
{
this
.
selectedColumn
}
className
=
"titleColumn"
/>
</
Modal
>
</
Row
>
</
Row
>
);
);
}
}
...
...
src/webui/src/static/const.ts
View file @
85cb472e
export
const
MANAGER_IP
=
`/api/v1/nni`
;
const
MANAGER_IP
=
`/api/v1/nni`
;
export
const
DOWNLOAD_IP
=
`/logs`
;
const
DOWNLOAD_IP
=
`/logs`
;
export
const
trialJobStatus
=
[
const
trialJobStatus
=
[
'
UNKNOWN
'
,
'
UNKNOWN
'
,
'
WAITING
'
,
'
WAITING
'
,
'
RUNNING
'
,
'
RUNNING
'
,
...
@@ -10,12 +10,47 @@ export const trialJobStatus = [
...
@@ -10,12 +10,47 @@ export const trialJobStatus = [
'
SYS_CANCELED
'
,
'
SYS_CANCELED
'
,
'
EARLY_STOPPED
'
'
EARLY_STOPPED
'
];
];
export
const
CONTROLTYPE
=
[
const
CONTROLTYPE
=
[
'
SEARCH_SPACE
'
,
'
SEARCH_SPACE
'
,
'
TRIAL_CONCURRENCY
'
,
'
TRIAL_CONCURRENCY
'
,
'
MAX_EXEC_DURATION
'
'
MAX_EXEC_DURATION
'
];
];
export
const
MONACO
=
{
const
MONACO
=
{
readOnly
:
true
,
readOnly
:
true
,
automaticLayout
:
true
automaticLayout
:
true
};
};
const
COLUMN_INDEX
=
[
{
name
:
'
Trial No
'
,
index
:
1
},
{
name
:
'
id
'
,
index
:
2
},
{
name
:
'
duration
'
,
index
:
3
},
{
name
:
'
status
'
,
index
:
4
},
{
name
:
'
Default
'
,
index
:
5
},
{
name
:
'
Operation
'
,
index
:
10000
},
{
name
:
'
Intermediate Result
'
,
index
:
10001
}
];
const
COLUMN
=
[
'
Trial No
'
,
'
id
'
,
'
duration
'
,
'
status
'
,
'
Default
'
,
'
Operation
'
,
'
Intermediate Result
'
];
export
{
MANAGER_IP
,
DOWNLOAD_IP
,
trialJobStatus
,
CONTROLTYPE
,
MONACO
,
COLUMN
,
COLUMN_INDEX
};
src/webui/src/static/function.ts
View file @
85cb472e
import
{
FinalResult
}
from
'
./interface
'
;
import
{
FinalResult
,
FinalType
}
from
'
./interface
'
;
const
convertTime
=
(
num
:
number
)
=>
{
const
convertTime
=
(
num
:
number
)
=>
{
if
(
num
%
3600
===
0
)
{
if
(
num
%
3600
===
0
)
{
...
@@ -28,6 +28,7 @@ const convertDuration = (num: number) => {
...
@@ -28,6 +28,7 @@ const convertDuration = (num: number) => {
};
};
// get final result value
// get final result value
// draw Accuracy point graph
const
getFinalResult
=
(
final
:
FinalResult
)
=>
{
const
getFinalResult
=
(
final
:
FinalResult
)
=>
{
let
acc
;
let
acc
;
let
showDefault
=
0
;
let
showDefault
=
0
;
...
@@ -46,6 +47,21 @@ const getFinalResult = (final: FinalResult) => {
...
@@ -46,6 +47,21 @@ const getFinalResult = (final: FinalResult) => {
}
}
};
};
// get final result value // acc obj
const
getFinal
=
(
final
:
FinalResult
)
=>
{
let
showDefault
:
FinalType
;
if
(
final
)
{
showDefault
=
JSON
.
parse
(
final
[
0
].
data
);
if
(
typeof
showDefault
===
'
number
'
)
{
showDefault
=
{
default
:
showDefault
};
}
return
showDefault
;
}
else
{
return
undefined
;
}
};
export
{
export
{
convertTime
,
convertDuration
,
getFinalResult
convertTime
,
convertDuration
,
getFinalResult
,
getFinal
};
};
src/webui/src/static/interface.ts
View file @
85cb472e
// draw accuracy graph data interface
interface
TableObj
{
interface
TableObj
{
key
:
number
;
key
:
number
;
sequenceId
:
number
;
sequenceId
:
number
;
id
:
string
;
id
:
string
;
duration
:
number
;
duration
:
number
;
status
:
string
;
status
:
string
;
acc
?:
number
;
acc
?:
number
;
// draw accuracy graph
description
:
Parameters
;
description
:
Parameters
;
color
?:
string
;
color
?:
string
;
}
}
interface
TableObjFianl
{
key
:
number
;
sequenceId
:
number
;
id
:
string
;
duration
:
number
;
status
:
string
;
acc
?:
FinalType
;
description
:
Parameters
;
color
?:
string
;
}
interface
FinalType
{
default
:
string
;
}
interface
ErrorParameter
{
interface
ErrorParameter
{
error
?:
string
;
error
?:
string
;
}
}
interface
Parameters
{
interface
Parameters
{
parameters
:
ErrorParameter
;
parameters
:
ErrorParameter
;
logPath
?:
string
;
logPath
?:
string
;
...
@@ -93,5 +111,6 @@ export {
...
@@ -93,5 +111,6 @@ export {
TableObj
,
Parameters
,
Experiment
,
TableObj
,
Parameters
,
Experiment
,
AccurPoint
,
TrialNumber
,
TrialJob
,
AccurPoint
,
TrialNumber
,
TrialJob
,
DetailAccurPoint
,
TooltipForAccuracy
,
DetailAccurPoint
,
TooltipForAccuracy
,
ParaObj
,
VisualMapValue
,
Dimobj
,
FinalResult
ParaObj
,
VisualMapValue
,
Dimobj
,
FinalResult
,
TableObjFianl
,
FinalType
};
};
src/webui/src/static/style/search.scss
View file @
85cb472e
/* some buttons about trial-detail table */
.allList
{
.allList
{
width
:
96%
;
width
:
96%
;
margin
:
0
auto
;
margin
:
0
auto
;
...
@@ -17,4 +18,17 @@
...
@@ -17,4 +18,17 @@
}
}
}
}
.titleColumn
{
.ant-checkbox-group-item
{
display
:
block
;
}
}
.applyfooter
{
/* apply button style */
.apply
{
text-align
:
right
;
}
}
tools/nni_cmd/config_schema.py
View file @
85cb472e
...
@@ -68,6 +68,16 @@ Optional('tuner'): Or({
...
@@ -68,6 +68,16 @@ Optional('tuner'): Or({
Optional
(
'n_output_node'
):
int
,
Optional
(
'n_output_node'
):
int
,
},
},
Optional
(
'gpuNum'
):
And
(
int
,
lambda
x
:
0
<=
x
<=
99999
),
Optional
(
'gpuNum'
):
And
(
int
,
lambda
x
:
0
<=
x
<=
99999
),
},{
'builtinTunerName'
:
'MetisTuner'
,
'classArgs'
:
{
Optional
(
'optimize_mode'
):
Or
(
'maximize'
,
'minimize'
),
Optional
(
'no_resampling'
):
bool
,
Optional
(
'no_candidates'
):
bool
,
Optional
(
'selection_num_starting_points'
):
int
,
Optional
(
'cold_start_num'
):
int
,
},
Optional
(
'gpuNum'
):
And
(
int
,
lambda
x
:
0
<=
x
<=
99999
),
},{
},{
'codeDir'
:
os
.
path
.
exists
,
'codeDir'
:
os
.
path
.
exists
,
'classFileName'
:
str
,
'classFileName'
:
str
,
...
...
tools/nni_cmd/launcher_utils.py
View file @
85cb472e
...
@@ -21,7 +21,7 @@
...
@@ -21,7 +21,7 @@
import
os
import
os
import
json
import
json
from
.config_schema
import
LOCAL_CONFIG_SCHEMA
,
REMOTE_CONFIG_SCHEMA
,
PAI_CONFIG_SCHEMA
,
KUBEFLOW_CONFIG_SCHEMA
,
FRAMEWORKCONTROLLER_CONFIG_SCHEMA
from
.config_schema
import
LOCAL_CONFIG_SCHEMA
,
REMOTE_CONFIG_SCHEMA
,
PAI_CONFIG_SCHEMA
,
KUBEFLOW_CONFIG_SCHEMA
,
FRAMEWORKCONTROLLER_CONFIG_SCHEMA
from
.common_utils
import
get_json_content
,
print_error
,
print_warning
from
.common_utils
import
get_json_content
,
print_error
,
print_warning
,
print_normal
def
expand_path
(
experiment_config
,
key
):
def
expand_path
(
experiment_config
,
key
):
'''Change '~' to user home directory'''
'''Change '~' to user home directory'''
...
@@ -32,7 +32,7 @@ def parse_relative_path(root_path, experiment_config, key):
...
@@ -32,7 +32,7 @@ def parse_relative_path(root_path, experiment_config, key):
'''Change relative path to absolute path'''
'''Change relative path to absolute path'''
if
experiment_config
.
get
(
key
)
and
not
os
.
path
.
isabs
(
experiment_config
.
get
(
key
)):
if
experiment_config
.
get
(
key
)
and
not
os
.
path
.
isabs
(
experiment_config
.
get
(
key
)):
absolute_path
=
os
.
path
.
join
(
root_path
,
experiment_config
.
get
(
key
))
absolute_path
=
os
.
path
.
join
(
root_path
,
experiment_config
.
get
(
key
))
print_
warning
(
'expand %s: %s to %s '
%
(
key
,
experiment_config
[
key
],
absolute_path
))
print_
normal
(
'expand %s: %s to %s '
%
(
key
,
experiment_config
[
key
],
absolute_path
))
experiment_config
[
key
]
=
absolute_path
experiment_config
[
key
]
=
absolute_path
def
parse_time
(
experiment_config
):
def
parse_time
(
experiment_config
):
...
...
tools/nni_trial_tool/log_utils.py
View file @
85cb472e
...
@@ -129,13 +129,15 @@ class PipeLogReader(threading.Thread):
...
@@ -129,13 +129,15 @@ class PipeLogReader(threading.Thread):
self
.
pipeReader
=
os
.
fdopen
(
self
.
fdRead
)
self
.
pipeReader
=
os
.
fdopen
(
self
.
fdRead
)
self
.
orig_stdout
=
sys
.
__stdout__
self
.
orig_stdout
=
sys
.
__stdout__
self
.
_is_read_completed
=
False
self
.
_is_read_completed
=
False
self
.
process_exit
=
False
def
_populateQueue
(
stream
,
queue
):
def
_populateQueue
(
stream
,
queue
):
'''
'''
Collect lines from 'stream' and put them in 'quque'.
Collect lines from 'stream' and put them in 'quque'.
'''
'''
time
.
sleep
(
5
)
time
.
sleep
(
5
)
while
True
:
while
True
:
cur_process_exit
=
self
.
process_exit
try
:
try
:
line
=
self
.
queue
.
get
(
True
,
5
)
line
=
self
.
queue
.
get
(
True
,
5
)
try
:
try
:
...
@@ -144,9 +146,10 @@ class PipeLogReader(threading.Thread):
...
@@ -144,9 +146,10 @@ class PipeLogReader(threading.Thread):
self
.
orig_stdout
.
flush
()
self
.
orig_stdout
.
flush
()
except
Exception
as
e
:
except
Exception
as
e
:
pass
pass
except
Exception
as
e
:
except
Exception
as
e
:
self
.
_is_read_completed
=
True
if
cur_process_exit
==
True
:
break
self
.
_is_read_completed
=
True
break
self
.
pip_log_reader_thread
=
threading
.
Thread
(
target
=
_populateQueue
,
self
.
pip_log_reader_thread
=
threading
.
Thread
(
target
=
_populateQueue
,
args
=
(
self
.
pipeReader
,
self
.
queue
))
args
=
(
self
.
pipeReader
,
self
.
queue
))
...
@@ -175,4 +178,8 @@ class PipeLogReader(threading.Thread):
...
@@ -175,4 +178,8 @@ class PipeLogReader(threading.Thread):
def
is_read_completed
(
self
):
def
is_read_completed
(
self
):
"""Return if read is completed
"""Return if read is completed
"""
"""
return
self
.
_is_read_completed
return
self
.
_is_read_completed
\ No newline at end of file
def
set_process_exit
(
self
):
self
.
process_exit
=
True
return
self
.
process_exit
\ No newline at end of file
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment