Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nni
Commits
8314d6ee
Commit
8314d6ee
authored
Sep 07, 2018
by
Deshui Yu
Committed by
fishyds
Sep 07, 2018
Browse files
Merge from dogfood branch to master
parent
98530fd2
Changes
103
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1904 additions
and
39 deletions
+1904
-39
test/naive/run.py
test/naive/run.py
+9
-7
tools/nni_annotation/README.md
tools/nni_annotation/README.md
+55
-0
tools/nni_annotation/__init__.py
tools/nni_annotation/__init__.py
+104
-0
tools/nni_annotation/code_generator.py
tools/nni_annotation/code_generator.py
+240
-0
tools/nni_annotation/examples/mnist_generated.py
tools/nni_annotation/examples/mnist_generated.py
+185
-0
tools/nni_annotation/examples/mnist_with_annotation.json
tools/nni_annotation/examples/mnist_with_annotation.json
+56
-0
tools/nni_annotation/examples/mnist_with_annotation.py
tools/nni_annotation/examples/mnist_with_annotation.py
+254
-0
tools/nni_annotation/examples/mnist_without_annotation.json
tools/nni_annotation/examples/mnist_without_annotation.json
+56
-0
tools/nni_annotation/examples/mnist_without_annotation.py
tools/nni_annotation/examples/mnist_without_annotation.py
+248
-0
tools/nni_annotation/search_space_generator.py
tools/nni_annotation/search_space_generator.py
+123
-0
tools/nni_annotation/test_annotation.py
tools/nni_annotation/test_annotation.py
+60
-0
tools/nni_annotation/testcase/annotated/dir/simple.py
tools/nni_annotation/testcase/annotated/dir/simple.py
+14
-0
tools/nni_annotation/testcase/annotated/handwrite.py
tools/nni_annotation/testcase/annotated/handwrite.py
+13
-0
tools/nni_annotation/testcase/annotated/mnist.py
tools/nni_annotation/testcase/annotated/mnist.py
+171
-0
tools/nni_annotation/testcase/searchspace.json
tools/nni_annotation/testcase/searchspace.json
+54
-0
tools/nni_annotation/testcase/usercode/dir/simple.py
tools/nni_annotation/testcase/usercode/dir/simple.py
+11
-0
tools/nni_annotation/testcase/usercode/mnist.py
tools/nni_annotation/testcase/usercode/mnist.py
+208
-0
tools/nni_annotation/testcase/usercode/nonpy.txt
tools/nni_annotation/testcase/usercode/nonpy.txt
+1
-0
tools/nnicmd/config_utils.py
tools/nnicmd/config_utils.py
+1
-1
tools/nnicmd/launcher.py
tools/nnicmd/launcher.py
+41
-31
No files found.
test/naive/run.py
View file @
8314d6ee
...
...
@@ -5,6 +5,7 @@ import json
import
os
import
subprocess
import
time
import
traceback
GREEN
=
'
\33
[32m'
RED
=
'
\33
[31m'
...
...
@@ -25,7 +26,7 @@ def run():
with
contextlib
.
suppress
(
FileNotFoundError
):
os
.
remove
(
'tuner_result.txt'
)
with
contextlib
.
suppress
(
FileNotFoundError
):
os
.
remove
(
'assessor_result.txt'
)
os
.
remove
(
'
/tmp/nni_
assessor_result.txt'
)
proc
=
subprocess
.
run
([
'nnictl'
,
'create'
,
'--config'
,
'local.yml'
])
assert
proc
.
returncode
==
0
,
'`nnictl create` failed with code %d'
%
proc
.
returncode
...
...
@@ -36,8 +37,8 @@ def run():
for
_
in
range
(
60
):
time
.
sleep
(
1
)
tuner_status
=
read_last_line
(
'tuner_result.txt'
)
assessor_status
=
read_last_line
(
'assessor_result.txt'
)
tuner_status
=
read_last_line
(
'
/tmp/nni_
tuner_result.txt'
)
assessor_status
=
read_last_line
(
'
/tmp/nni_
assessor_result.txt'
)
assert
tuner_status
!=
'ERROR'
,
'Tuner exited with error'
assert
assessor_status
!=
'ERROR'
,
'Assessor exited with error'
...
...
@@ -46,7 +47,7 @@ def run():
break
if
tuner_status
is
not
None
:
for
line
in
open
(
'tuner_result.txt'
):
for
line
in
open
(
'
/tmp/nni_
tuner_result.txt'
):
if
line
.
strip
()
in
(
'DONE'
,
'ERROR'
):
break
trial
=
int
(
line
.
split
(
' '
)[
0
])
...
...
@@ -57,16 +58,16 @@ def run():
assert
tuner_status
==
'DONE'
and
assessor_status
==
'DONE'
,
'Failed to finish in 1 min'
ss1
=
json
.
load
(
open
(
'search_space.json'
))
ss2
=
json
.
load
(
open
(
'tuner_search_space.json'
))
ss2
=
json
.
load
(
open
(
'
/tmp/nni_
tuner_search_space.json'
))
assert
ss1
==
ss2
,
'Tuner got wrong search space'
tuner_result
=
set
(
open
(
'tuner_result.txt'
))
tuner_result
=
set
(
open
(
'
/tmp/nni_
tuner_result.txt'
))
expected
=
set
(
open
(
'expected_tuner_result.txt'
))
# Trials may complete before NNI gets assessor's result,
# so it is possible to have more final result than expected
assert
tuner_result
.
issuperset
(
expected
),
'Bad tuner result'
assessor_result
=
set
(
open
(
'assessor_result.txt'
))
assessor_result
=
set
(
open
(
'
/tmp/nni_
assessor_result.txt'
))
expected
=
set
(
open
(
'expected_assessor_result.txt'
))
assert
assessor_result
==
expected
,
'Bad assessor result'
...
...
@@ -78,5 +79,6 @@ if __name__ == '__main__':
except
Exception
as
e
:
print
(
RED
+
'FAIL'
+
CLEAR
)
print
(
'%r'
%
e
)
traceback
.
print_exc
()
subprocess
.
run
([
'nnictl'
,
'stop'
])
tools/nni_annotation/README.md
0 → 100644
View file @
8314d6ee
# Introduction
For good user experience and reduce user effort, we need to design a good annotation grammar.
If users use NNI system, they only need to:
1.
Annotation variable in code as:
'''@nni.variable(nni.choice(2,3,5,7),name=self.conv_size)'''
2.
Annotation intermediate in code as:
'''@nni.report_intermediate_result(test_acc)'''
3.
Annotation output in code as:
'''@nni.report_final_result(test_acc)'''
4.
Annotation
`function_choice`
in code as:
'''@nni.function_choice(max_pool(h_conv1, self.pool_size),avg_pool(h_conv1, self.pool_size),name=max_pool)'''
In this way, they can easily realize automatic tuning on NNI.
For
`@nni.variable`
,
`nni.choice`
is the type of search space and there are 10 types to express your search space as follows:
1.
`@nni.variable(nni.choice(option1,option2,...,optionN),name=variable)`
Which means the variable value is one of the options, which should be a list The elements of options can themselves be stochastic expressions
2.
`@nni.variable(nni.randint(upper),name=variable)`
Which means the variable value is a random integer in the range [0, upper).
3.
`@nni.variable(nni.uniform(low, high),name=variable)`
Which means the variable value is a value uniformly between low and high.
4.
`@nni.variable(nni.quniform(low, high, q),name=variable)`
Which means the variable value is a value like round(uniform(low, high) / q)
*
q
5.
`@nni.variable(nni.loguniform(low, high),name=variable)`
Which means the variable value is a value drawn according to exp(uniform(low, high)) so that the logarithm of the return value is uniformly distributed.
6.
`@nni.variable(nni.qloguniform(low, high, q),name=variable)`
Which means the variable value is a value like round(exp(uniform(low, high)) / q)
*
q
7.
`@nni.variable(nni.normal(label, mu, sigma),name=variable)`
Which means the variable value is a real value that's normally-distributed with mean mu and standard deviation sigma.
8.
`@nni.variable(nni.qnormal(label, mu, sigma, q),name=variable)`
Which means the variable value is a value like round(normal(mu, sigma) / q)
*
q
9.
`@nni.variable(nni.lognormal(label, mu, sigma),name=variable)`
Which means the variable value is a value drawn according to exp(normal(mu, sigma))
10.
`@nni.variable(nni.qlognormal(label, mu, sigma, q),name=variable)`
Which means the variable value is a value like round(exp(normal(mu, sigma)) / q)
*
q
tools/nni_annotation/__init__.py
0 → 100644
View file @
8314d6ee
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
import
os
import
shutil
from
.
import
code_generator
from
.
import
search_space_generator
__all__
=
[
'generate_search_space'
,
'expand_annotations'
]
def
generate_search_space
(
code_dir
):
"""Generate search space from Python source code.
Return a serializable search space object.
code_dir: directory path of source files (str)
"""
search_space
=
{}
if
code_dir
.
endswith
(
'/'
):
code_dir
=
code_dir
[:
-
1
]
for
subdir
,
_
,
files
in
os
.
walk
(
code_dir
):
# generate module name from path
if
subdir
==
code_dir
:
package
=
''
else
:
assert
subdir
.
startswith
(
code_dir
+
'/'
),
subdir
prefix_len
=
len
(
code_dir
)
+
1
package
=
subdir
[
prefix_len
:].
replace
(
'/'
,
'.'
)
+
'.'
for
file_name
in
files
:
if
file_name
.
endswith
(
'.py'
):
path
=
os
.
path
.
join
(
subdir
,
file_name
)
module
=
package
+
file_name
[:
-
3
]
search_space
.
update
(
_generate_file_search_space
(
path
,
module
))
return
search_space
def
_generate_file_search_space
(
path
,
module
):
with
open
(
path
)
as
src
:
try
:
return
search_space_generator
.
generate
(
module
,
src
.
read
())
except
Exception
as
exc
:
# pylint: disable=broad-except
if
exc
.
args
:
raise
RuntimeError
(
path
+
' '
+
'
\n
'
.
join
(
exc
.
args
))
else
:
raise
RuntimeError
(
'Failed to generate search space for %s: %r'
%
(
path
,
exc
))
def
expand_annotations
(
src_dir
,
dst_dir
):
"""Expand annotations in user code.
src_dir: directory path of user code (str)
dst_dir: directory to place generated files (str)
"""
if
src_dir
[
-
1
]
==
'/'
:
src_dir
=
src_dir
[:
-
1
]
if
dst_dir
[
-
1
]
==
'/'
:
dst_dir
=
dst_dir
[:
-
1
]
for
src_subdir
,
dirs
,
files
in
os
.
walk
(
src_dir
):
assert
src_subdir
.
startswith
(
src_dir
)
dst_subdir
=
src_subdir
.
replace
(
src_dir
,
dst_dir
,
1
)
os
.
makedirs
(
dst_subdir
,
exist_ok
=
True
)
for
file_name
in
files
:
src_path
=
os
.
path
.
join
(
src_subdir
,
file_name
)
dst_path
=
os
.
path
.
join
(
dst_subdir
,
file_name
)
if
file_name
.
endswith
(
'.py'
):
_expand_file_annotations
(
src_path
,
dst_path
)
else
:
shutil
.
copyfile
(
src_path
,
dst_path
)
for
dir_name
in
dirs
:
os
.
makedirs
(
os
.
path
.
join
(
dst_subdir
,
dir_name
),
exist_ok
=
True
)
def
_expand_file_annotations
(
src_path
,
dst_path
):
with
open
(
src_path
)
as
src
,
open
(
dst_path
,
'w'
)
as
dst
:
try
:
dst
.
write
(
code_generator
.
parse
(
src
.
read
()))
except
Exception
as
exc
:
# pylint: disable=broad-except
if
exc
.
args
:
raise
RuntimeError
(
src_path
+
' '
+
'
\n
'
.
join
(
exc
.
args
))
else
:
raise
RuntimeError
(
'Failed to expand annotations for %s: %r'
%
(
src_path
,
exc
))
tools/nni_annotation/code_generator.py
0 → 100644
View file @
8314d6ee
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
import
ast
import
astor
# pylint: disable=unidiomatic-typecheck
def
parse_annotation
(
code
):
"""Parse an annotation string.
Return an AST Expr node.
code: annotation string (excluding '@')
"""
module
=
ast
.
parse
(
code
)
assert
type
(
module
)
is
ast
.
Module
,
'internal error #1'
assert
len
(
module
.
body
)
==
1
,
'Annotation contains more than one expression'
assert
type
(
module
.
body
[
0
])
is
ast
.
Expr
,
'Annotation is not expression'
return
module
.
body
[
0
]
def
parse_annotation_function
(
code
,
func_name
):
"""Parse an annotation function.
Return the value of `name` keyword argument and the AST Call node.
func_name: expected function name
"""
expr
=
parse_annotation
(
code
)
call
=
expr
.
value
assert
type
(
call
)
is
ast
.
Call
,
'Annotation is not a function call'
assert
type
(
call
.
func
)
is
ast
.
Attribute
,
'Unexpected annotation function'
assert
type
(
call
.
func
.
value
)
is
ast
.
Name
,
'Invalid annotation function name'
assert
call
.
func
.
value
.
id
==
'nni'
,
'Annotation is not a NNI function'
assert
call
.
func
.
attr
==
func_name
,
'internal error #2'
assert
len
(
call
.
keywords
)
==
1
,
'Annotation function contains more than one keyword argument'
assert
call
.
keywords
[
0
].
arg
==
'name'
,
'Annotation keyword argument is not "name"'
name
=
call
.
keywords
[
0
].
value
return
name
,
call
def
parse_nni_variable
(
code
):
"""Parse `nni.variable` expression.
Return the name argument and AST node of annotated expression.
code: annotation string
"""
name
,
call
=
parse_annotation_function
(
code
,
'variable'
)
assert
len
(
call
.
args
)
==
1
,
'nni.variable contains more than one arguments'
arg
=
call
.
args
[
0
]
assert
type
(
arg
)
is
ast
.
Call
,
'Value of nni.variable is not a function call'
assert
type
(
arg
.
func
)
is
ast
.
Attribute
,
'nni.variable value is not a NNI function'
assert
type
(
arg
.
func
.
value
)
is
ast
.
Name
,
'nni.variable value is not a NNI function'
assert
arg
.
func
.
value
.
id
==
'nni'
,
'nni.variable value is not a NNI function'
name_str
=
astor
.
to_source
(
name
).
strip
()
keyword_arg
=
ast
.
keyword
(
arg
=
'name'
,
value
=
ast
.
Str
(
s
=
name_str
))
arg
.
keywords
.
append
(
keyword_arg
)
return
name
,
arg
def
parse_nni_function
(
code
):
"""Parse `nni.function_choice` expression.
Return the AST node of annotated expression and a list of dumped function call expressions.
code: annotation string
"""
name
,
call
=
parse_annotation_function
(
code
,
'function_choice'
)
funcs
=
[
ast
.
dump
(
func
,
False
)
for
func
in
call
.
args
]
call
.
args
=
[
make_lambda
(
arg
)
for
arg
in
call
.
args
]
name_str
=
astor
.
to_source
(
name
).
strip
()
call
.
keywords
[
0
].
value
=
ast
.
Str
(
s
=
name_str
)
return
call
,
funcs
def
make_lambda
(
call
):
"""Wrap an AST Call node to lambda expression node.
call: ast.Call node
"""
assert
type
(
call
)
is
ast
.
Call
,
'Argument of nni.function_choice is not function call'
empty_args
=
ast
.
arguments
(
args
=
[],
vararg
=
None
,
kwarg
=
None
,
defaults
=
[])
return
ast
.
Lambda
(
args
=
empty_args
,
body
=
call
)
def
test_variable_equal
(
var1
,
var2
):
"""Test whether two variables are the same."""
if
type
(
var1
)
is
not
type
(
var2
):
return
False
if
type
(
var1
)
is
ast
.
Name
:
return
var1
.
id
==
var2
.
id
if
type
(
var1
)
is
ast
.
Attribute
:
return
var1
.
attr
==
var2
.
attr
and
test_variable_equal
(
var1
.
value
,
var2
.
value
)
return
False
def
replace_variable_node
(
node
,
annotation
):
"""Replace a node annotated by `nni.variable`.
node: the AST node to replace
annotation: annotation string
"""
assert
type
(
node
)
is
ast
.
Assign
,
'nni.variable is not annotating assignment expression'
assert
len
(
node
.
targets
)
==
1
,
'Annotated assignment has more than one left-hand value'
name
,
expr
=
parse_nni_variable
(
annotation
)
assert
test_variable_equal
(
node
.
targets
[
0
],
name
),
'Annotated variable has wrong name'
node
.
value
=
expr
return
node
def
replace_function_node
(
node
,
annotation
):
"""Replace a node annotated by `nni.function_choice`.
node: the AST node to replace
annotation: annotation string
"""
target
,
funcs
=
parse_nni_function
(
annotation
)
FuncReplacer
(
funcs
,
target
).
visit
(
node
)
return
node
class
FuncReplacer
(
ast
.
NodeTransformer
):
"""To replace target function call expressions in a node annotated by `nni.function_choice`"""
def
__init__
(
self
,
funcs
,
target
):
"""Constructor.
funcs: list of dumped function call expressions to replace
target: use this AST node to replace matching expressions
"""
self
.
funcs
=
set
(
funcs
)
self
.
target
=
target
def
visit_Call
(
self
,
node
):
# pylint: disable=invalid-name
if
ast
.
dump
(
node
,
False
)
in
self
.
funcs
:
return
self
.
target
return
node
class
Transformer
(
ast
.
NodeTransformer
):
"""Transform original code to annotated code"""
def
__init__
(
self
):
self
.
stack
=
[]
self
.
last_line
=
0
def
visit
(
self
,
node
):
if
isinstance
(
node
,
(
ast
.
expr
,
ast
.
stmt
)):
self
.
last_line
=
node
.
lineno
# do nothing for root
if
not
self
.
stack
:
return
self
.
_visit_children
(
node
)
annotation
=
self
.
stack
[
-
1
]
# this is a standalone string, may be an annotation
if
type
(
node
)
is
ast
.
Expr
and
type
(
node
.
value
)
is
ast
.
Str
:
# must not annotate an annotation string
assert
annotation
is
None
,
'Annotating an annotation'
return
self
.
_visit_string
(
node
)
if
annotation
is
not
None
:
# this expression is annotated
self
.
stack
[
-
1
]
=
None
# so next expression is not
if
annotation
.
startswith
(
'nni.variable'
):
return
replace_variable_node
(
node
,
annotation
)
if
annotation
.
startswith
(
'nni.function_choice'
):
return
replace_function_node
(
node
,
annotation
)
return
self
.
_visit_children
(
node
)
def
_visit_string
(
self
,
node
):
string
=
node
.
value
.
s
if
not
string
.
startswith
(
'@nni.'
):
return
node
# not an annotation, ignore it
if
string
.
startswith
(
'@nni.report_intermediate_result('
)
\
or
string
.
startswith
(
'@nni.report_final_result('
):
return
parse_annotation
(
string
[
1
:])
# expand annotation string to code
if
string
.
startswith
(
'@nni.variable('
)
\
or
string
.
startswith
(
'@nni.function_choice('
):
self
.
stack
[
-
1
]
=
string
[
1
:]
# mark that the next expression is annotated
return
None
raise
AssertionError
(
'Unexpected annotation function'
)
def
_visit_children
(
self
,
node
):
self
.
stack
.
append
(
None
)
self
.
generic_visit
(
node
)
annotation
=
self
.
stack
.
pop
()
assert
annotation
is
None
,
'Annotation has no target'
return
node
def
parse
(
code
):
"""Annotate user code.
Return annotated code (str).
code: original user code (str)
"""
try
:
ast_tree
=
ast
.
parse
(
code
)
except
Exception
:
raise
RuntimeError
(
'Bad Python code'
)
try
:
Transformer
().
visit
(
ast_tree
)
except
AssertionError
as
exc
:
raise
RuntimeError
(
'%d: %s'
%
(
ast_tree
.
last_line
,
exc
.
args
[
0
]))
last_future_import
=
-
1
import_nni
=
ast
.
Import
(
names
=
[
ast
.
alias
(
name
=
'nni'
,
asname
=
None
)])
nodes
=
ast_tree
.
body
for
i
,
_
in
enumerate
(
nodes
):
if
type
(
nodes
[
i
])
is
ast
.
ImportFrom
and
nodes
[
i
].
module
==
'__future__'
:
last_future_import
=
i
nodes
.
insert
(
last_future_import
+
1
,
import_nni
)
return
astor
.
to_source
(
ast_tree
)
tools/nni_annotation/examples/mnist_generated.py
0 → 100644
View file @
8314d6ee
import
nni
"""A deep MNIST classifier using convolutional layers."""
import
logging
import
math
import
tempfile
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
FLAGS
=
None
logger
=
logging
.
getLogger
(
'mnist_AutoML'
)
class
MnistNetwork
(
object
):
"""
MnistNetwork is for initlizing and building basic network for mnist.
"""
def
__init__
(
self
,
channel_1_num
,
channel_2_num
,
conv_size
,
hidden_size
,
pool_size
,
learning_rate
,
x_dim
=
784
,
y_dim
=
10
):
self
.
channel_1_num
=
channel_1_num
self
.
channel_2_num
=
channel_2_num
self
.
conv_size
=
nni
.
choice
(
2
,
3
,
5
,
7
,
name
=
'self.conv_size'
)
self
.
hidden_size
=
nni
.
choice
(
124
,
512
,
1024
,
name
=
'self.hidden_size'
)
self
.
pool_size
=
pool_size
self
.
learning_rate
=
nni
.
uniform
(
0.0001
,
0.1
,
name
=
'self.learning_rate'
)
self
.
x_dim
=
x_dim
self
.
y_dim
=
y_dim
self
.
images
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
x_dim
],
name
=
'input_x'
)
self
.
labels
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
y_dim
],
name
=
'input_y'
)
self
.
keep_prob
=
tf
.
placeholder
(
tf
.
float32
,
name
=
'keep_prob'
)
self
.
train_step
=
None
self
.
accuracy
=
None
def
build_network
(
self
):
"""
Building network for mnist
"""
with
tf
.
name_scope
(
'reshape'
):
try
:
input_dim
=
int
(
math
.
sqrt
(
self
.
x_dim
))
except
:
print
(
'input dim cannot be sqrt and reshape. input dim: '
+
str
(
self
.
x_dim
))
logger
.
debug
(
'input dim cannot be sqrt and reshape. input dim: %s'
,
str
(
self
.
x_dim
))
raise
x_image
=
tf
.
reshape
(
self
.
images
,
[
-
1
,
input_dim
,
input_dim
,
1
])
with
tf
.
name_scope
(
'conv1'
):
w_conv1
=
weight_variable
([
self
.
conv_size
,
self
.
conv_size
,
1
,
self
.
channel_1_num
])
b_conv1
=
bias_variable
([
self
.
channel_1_num
])
h_conv1
=
nni
.
function_choice
(
lambda
:
tf
.
nn
.
relu
(
conv2d
(
x_image
,
w_conv1
)
+
b_conv1
),
lambda
:
tf
.
nn
.
sigmoid
(
conv2d
(
x_image
,
w_conv1
)
+
b_conv1
),
lambda
:
tf
.
nn
.
tanh
(
conv2d
(
x_image
,
w_conv1
)
+
b_conv1
),
name
=
'tf.nn.relu'
)
with
tf
.
name_scope
(
'pool1'
):
h_pool1
=
nni
.
function_choice
(
lambda
:
max_pool
(
h_conv1
,
self
.
pool_size
),
lambda
:
avg_pool
(
h_conv1
,
self
.
pool_size
),
name
=
'max_pool'
)
with
tf
.
name_scope
(
'conv2'
):
w_conv2
=
weight_variable
([
self
.
conv_size
,
self
.
conv_size
,
self
.
channel_1_num
,
self
.
channel_2_num
])
b_conv2
=
bias_variable
([
self
.
channel_2_num
])
h_conv2
=
tf
.
nn
.
relu
(
conv2d
(
h_pool1
,
w_conv2
)
+
b_conv2
)
with
tf
.
name_scope
(
'pool2'
):
h_pool2
=
max_pool
(
h_conv2
,
self
.
pool_size
)
last_dim
=
int
(
input_dim
/
(
self
.
pool_size
*
self
.
pool_size
))
with
tf
.
name_scope
(
'fc1'
):
w_fc1
=
weight_variable
([
last_dim
*
last_dim
*
self
.
channel_2_num
,
self
.
hidden_size
])
b_fc1
=
bias_variable
([
self
.
hidden_size
])
h_pool2_flat
=
tf
.
reshape
(
h_pool2
,
[
-
1
,
last_dim
*
last_dim
*
self
.
channel_2_num
])
h_fc1
=
tf
.
nn
.
relu
(
tf
.
matmul
(
h_pool2_flat
,
w_fc1
)
+
b_fc1
)
with
tf
.
name_scope
(
'dropout'
):
h_fc1_drop
=
tf
.
nn
.
dropout
(
h_fc1
,
self
.
keep_prob
)
with
tf
.
name_scope
(
'fc2'
):
w_fc2
=
weight_variable
([
self
.
hidden_size
,
self
.
y_dim
])
b_fc2
=
bias_variable
([
self
.
y_dim
])
y_conv
=
tf
.
matmul
(
h_fc1_drop
,
w_fc2
)
+
b_fc2
with
tf
.
name_scope
(
'loss'
):
cross_entropy
=
tf
.
reduce_mean
(
tf
.
nn
.
softmax_cross_entropy_with_logits
(
labels
=
self
.
labels
,
logits
=
y_conv
))
with
tf
.
name_scope
(
'adam_optimizer'
):
self
.
train_step
=
tf
.
train
.
AdamOptimizer
(
self
.
learning_rate
).
minimize
(
cross_entropy
)
with
tf
.
name_scope
(
'accuracy'
):
correct_prediction
=
tf
.
equal
(
tf
.
argmax
(
y_conv
,
1
),
tf
.
argmax
(
self
.
labels
,
1
))
self
.
accuracy
=
tf
.
reduce_mean
(
tf
.
cast
(
correct_prediction
,
tf
.
float32
))
def
conv2d
(
x_input
,
w_matrix
):
"""conv2d returns a 2d convolution layer with full stride."""
return
tf
.
nn
.
conv2d
(
x_input
,
w_matrix
,
strides
=
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
)
def
max_pool
(
x_input
,
pool_size
):
"""max_pool downsamples a feature map by 2X."""
return
tf
.
nn
.
max_pool
(
x_input
,
ksize
=
[
1
,
pool_size
,
pool_size
,
1
],
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
avg_pool
(
x_input
,
pool_size
):
return
tf
.
nn
.
avg_pool
(
x_input
,
ksize
=
[
1
,
pool_size
,
pool_size
,
1
],
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
weight_variable
(
shape
):
"""weight_variable generates a weight variable of a given shape."""
initial
=
tf
.
truncated_normal
(
shape
,
stddev
=
0.1
)
return
tf
.
Variable
(
initial
)
def
bias_variable
(
shape
):
"""bias_variable generates a bias variable of a given shape."""
initial
=
tf
.
constant
(
0.1
,
shape
=
shape
)
return
tf
.
Variable
(
initial
)
def
main
(
params
):
"""
Main function, build mnist network, run and send result to NNI.
"""
mnist
=
input_data
.
read_data_sets
(
params
[
'data_dir'
],
one_hot
=
True
)
print
(
'Mnist download data down.'
)
logger
.
debug
(
'Mnist download data down.'
)
mnist_network
=
MnistNetwork
(
channel_1_num
=
params
[
'channel_1_num'
],
channel_2_num
=
params
[
'channel_2_num'
],
conv_size
=
params
[
'conv_size'
],
hidden_size
=
params
[
'hidden_size'
],
pool_size
=
params
[
'pool_size'
],
learning_rate
=
params
[
'learning_rate'
])
mnist_network
.
build_network
()
logger
.
debug
(
'Mnist build network done.'
)
graph_location
=
tempfile
.
mkdtemp
()
logger
.
debug
(
'Saving graph to: %s'
,
graph_location
)
train_writer
=
tf
.
summary
.
FileWriter
(
graph_location
)
train_writer
.
add_graph
(
tf
.
get_default_graph
())
test_acc
=
0.0
with
tf
.
Session
()
as
sess
:
sess
.
run
(
tf
.
global_variables_initializer
())
batch_num
=
nni
.
choice
(
50
,
250
,
500
,
name
=
'batch_num'
)
for
i
in
range
(
batch_num
):
batch
=
mnist
.
train
.
next_batch
(
batch_num
)
dropout_rate
=
nni
.
choice
(
1
,
5
,
name
=
'dropout_rate'
)
mnist_network
.
train_step
.
run
(
feed_dict
=
{
mnist_network
.
images
:
batch
[
0
],
mnist_network
.
labels
:
batch
[
1
],
mnist_network
.
keep_prob
:
dropout_rate
})
if
i
%
100
==
0
:
test_acc
=
mnist_network
.
accuracy
.
eval
(
feed_dict
=
{
mnist_network
.
images
:
mnist
.
test
.
images
,
mnist_network
.
labels
:
mnist
.
test
.
labels
,
mnist_network
.
keep_prob
:
1.0
})
nni
.
report_intermediate_result
(
test_acc
)
logger
.
debug
(
'test accuracy %g'
,
test_acc
)
logger
.
debug
(
'Pipe send intermediate result done.'
)
test_acc
=
mnist_network
.
accuracy
.
eval
(
feed_dict
=
{
mnist_network
.
images
:
mnist
.
test
.
images
,
mnist_network
.
labels
:
mnist
.
test
.
labels
,
mnist_network
.
keep_prob
:
1.0
})
nni
.
report_final_result
(
test_acc
)
logger
.
debug
(
'Final result is %g'
,
test_acc
)
logger
.
debug
(
'Send final result done.'
)
def
generate_defualt_params
():
"""
Generate default parameters for mnist network.
"""
params
=
{
'data_dir'
:
'/tmp/tensorflow/mnist/input_data'
,
'dropout_rate'
:
0.5
,
'channel_1_num'
:
32
,
'channel_2_num'
:
64
,
'conv_size'
:
5
,
'pool_size'
:
2
,
'hidden_size'
:
1024
,
'learning_rate'
:
0.0001
,
'batch_num'
:
200
}
return
params
if
__name__
==
'__main__'
:
try
:
main
(
generate_defualt_params
())
except
Exception
as
exception
:
logger
.
exception
(
exception
)
raise
tools/nni_annotation/examples/mnist_with_annotation.json
0 → 100644
View file @
8314d6ee
{
"mnist_with_annotation/batch_num/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
]
},
"mnist_with_annotation/dropout_rate/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
]
},
"mnist_with_annotation/max_pool/function_choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
]
},
"mnist_with_annotation/self.conv_size/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
,
3
]
},
"mnist_with_annotation/self.hidden_size/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
]
},
"mnist_with_annotation/self.learning_rate/uniform"
:
{
"_type"
:
"uniform"
,
"_value"
:
[
0.0001
,
0.1
]
},
"mnist_with_annotation/tf.nn.relu/function_choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
]
}
}
\ No newline at end of file
tools/nni_annotation/examples/mnist_with_annotation.py
0 → 100644
View file @
8314d6ee
#!/usr/bin/python
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""A deep MNIST classifier using convolutional layers."""
import
logging
import
math
import
tempfile
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
FLAGS
=
None
logger
=
logging
.
getLogger
(
'mnist_AutoML'
)
class
MnistNetwork
(
object
):
'''
MnistNetwork is for initlizing and building basic network for mnist.
'''
def
__init__
(
self
,
channel_1_num
,
channel_2_num
,
conv_size
,
hidden_size
,
pool_size
,
learning_rate
,
x_dim
=
784
,
y_dim
=
10
):
self
.
channel_1_num
=
channel_1_num
self
.
channel_2_num
=
channel_2_num
"""@nni.variable(nni.choice(2, 3, 5, 7),name=self.conv_size)"""
self
.
conv_size
=
conv_size
"""@nni.variable(nni.choice(124, 512, 1024), name=self.hidden_size)"""
self
.
hidden_size
=
hidden_size
self
.
pool_size
=
pool_size
"""@nni.variable(nni.uniform(0.0001, 0.1), name=self.learning_rate)"""
self
.
learning_rate
=
learning_rate
self
.
x_dim
=
x_dim
self
.
y_dim
=
y_dim
self
.
images
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
x_dim
],
name
=
'input_x'
)
self
.
labels
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
y_dim
],
name
=
'input_y'
)
self
.
keep_prob
=
tf
.
placeholder
(
tf
.
float32
,
name
=
'keep_prob'
)
self
.
train_step
=
None
self
.
accuracy
=
None
def
build_network
(
self
):
'''
Building network for mnist
'''
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with
tf
.
name_scope
(
'reshape'
):
try
:
input_dim
=
int
(
math
.
sqrt
(
self
.
x_dim
))
except
:
print
(
'input dim cannot be sqrt and reshape. input dim: '
+
str
(
self
.
x_dim
))
logger
.
debug
(
'input dim cannot be sqrt and reshape. input dim: %s'
,
str
(
self
.
x_dim
))
raise
x_image
=
tf
.
reshape
(
self
.
images
,
[
-
1
,
input_dim
,
input_dim
,
1
])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with
tf
.
name_scope
(
'conv1'
):
w_conv1
=
weight_variable
(
[
self
.
conv_size
,
self
.
conv_size
,
1
,
self
.
channel_1_num
])
b_conv1
=
bias_variable
([
self
.
channel_1_num
])
"""@nni.function_choice(tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1), tf.nn.sigmoid(conv2d(x_image, w_conv1) + b_conv1), tf.nn.tanh(conv2d(x_image, w_conv1) + b_conv1), name=tf.nn.relu)"""
h_conv1
=
tf
.
nn
.
relu
(
conv2d
(
x_image
,
w_conv1
)
+
b_conv1
)
# Pooling layer - downsamples by 2X.
with
tf
.
name_scope
(
'pool1'
):
"""@nni.function_choice(max_pool(h_conv1, self.pool_size), avg_pool(h_conv1, self.pool_size), name=max_pool)"""
h_pool1
=
max_pool
(
h_conv1
,
self
.
pool_size
)
# Second convolutional layer -- maps 32 feature maps to 64.
with
tf
.
name_scope
(
'conv2'
):
w_conv2
=
weight_variable
([
self
.
conv_size
,
self
.
conv_size
,
self
.
channel_1_num
,
self
.
channel_2_num
])
b_conv2
=
bias_variable
([
self
.
channel_2_num
])
h_conv2
=
tf
.
nn
.
relu
(
conv2d
(
h_pool1
,
w_conv2
)
+
b_conv2
)
# Second pooling layer.
with
tf
.
name_scope
(
'pool2'
):
h_pool2
=
max_pool
(
h_conv2
,
self
.
pool_size
)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
last_dim
=
int
(
input_dim
/
(
self
.
pool_size
*
self
.
pool_size
))
with
tf
.
name_scope
(
'fc1'
):
w_fc1
=
weight_variable
(
[
last_dim
*
last_dim
*
self
.
channel_2_num
,
self
.
hidden_size
])
b_fc1
=
bias_variable
([
self
.
hidden_size
])
h_pool2_flat
=
tf
.
reshape
(
h_pool2
,
[
-
1
,
last_dim
*
last_dim
*
self
.
channel_2_num
])
h_fc1
=
tf
.
nn
.
relu
(
tf
.
matmul
(
h_pool2_flat
,
w_fc1
)
+
b_fc1
)
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with
tf
.
name_scope
(
'dropout'
):
h_fc1_drop
=
tf
.
nn
.
dropout
(
h_fc1
,
self
.
keep_prob
)
# Map the 1024 features to 10 classes, one for each digit
with
tf
.
name_scope
(
'fc2'
):
w_fc2
=
weight_variable
([
self
.
hidden_size
,
self
.
y_dim
])
b_fc2
=
bias_variable
([
self
.
y_dim
])
y_conv
=
tf
.
matmul
(
h_fc1_drop
,
w_fc2
)
+
b_fc2
with
tf
.
name_scope
(
'loss'
):
cross_entropy
=
tf
.
reduce_mean
(
tf
.
nn
.
softmax_cross_entropy_with_logits
(
labels
=
self
.
labels
,
logits
=
y_conv
))
with
tf
.
name_scope
(
'adam_optimizer'
):
self
.
train_step
=
tf
.
train
.
AdamOptimizer
(
self
.
learning_rate
).
minimize
(
cross_entropy
)
with
tf
.
name_scope
(
'accuracy'
):
correct_prediction
=
tf
.
equal
(
tf
.
argmax
(
y_conv
,
1
),
tf
.
argmax
(
self
.
labels
,
1
))
self
.
accuracy
=
tf
.
reduce_mean
(
tf
.
cast
(
correct_prediction
,
tf
.
float32
))
def
conv2d
(
x_input
,
w_matrix
):
"""conv2d returns a 2d convolution layer with full stride."""
return
tf
.
nn
.
conv2d
(
x_input
,
w_matrix
,
strides
=
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
)
def
max_pool
(
x_input
,
pool_size
):
"""max_pool downsamples a feature map by 2X."""
return
tf
.
nn
.
max_pool
(
x_input
,
ksize
=
[
1
,
pool_size
,
pool_size
,
1
],
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
avg_pool
(
x_input
,
pool_size
):
return
tf
.
nn
.
avg_pool
(
x_input
,
ksize
=
[
1
,
pool_size
,
pool_size
,
1
],
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
weight_variable
(
shape
):
"""weight_variable generates a weight variable of a given shape."""
initial
=
tf
.
truncated_normal
(
shape
,
stddev
=
0.1
)
return
tf
.
Variable
(
initial
)
def
bias_variable
(
shape
):
"""bias_variable generates a bias variable of a given shape."""
initial
=
tf
.
constant
(
0.1
,
shape
=
shape
)
return
tf
.
Variable
(
initial
)
def
main
(
params
):
'''
Main function, build mnist network, run and send result to NNI.
'''
# Import data
mnist
=
input_data
.
read_data_sets
(
params
[
'data_dir'
],
one_hot
=
True
)
print
(
'Mnist download data down.'
)
logger
.
debug
(
'Mnist download data down.'
)
# Create the model
# Build the graph for the deep net
mnist_network
=
MnistNetwork
(
channel_1_num
=
params
[
'channel_1_num'
],
channel_2_num
=
params
[
'channel_2_num'
],
conv_size
=
params
[
'conv_size'
],
hidden_size
=
params
[
'hidden_size'
],
pool_size
=
params
[
'pool_size'
],
learning_rate
=
params
[
'learning_rate'
])
mnist_network
.
build_network
()
logger
.
debug
(
'Mnist build network done.'
)
# Write log
graph_location
=
tempfile
.
mkdtemp
()
logger
.
debug
(
'Saving graph to: %s'
,
graph_location
)
train_writer
=
tf
.
summary
.
FileWriter
(
graph_location
)
train_writer
.
add_graph
(
tf
.
get_default_graph
())
test_acc
=
0.0
with
tf
.
Session
()
as
sess
:
sess
.
run
(
tf
.
global_variables_initializer
())
"""@nni.variable(nni.choice(50, 250, 500), name=batch_num)"""
batch_num
=
params
[
'batch_num'
]
for
i
in
range
(
batch_num
):
batch
=
mnist
.
train
.
next_batch
(
batch_num
)
"""@nni.variable(nni.choice(1, 5), name=dropout_rate)"""
dropout_rate
=
params
[
'dropout_rate'
]
mnist_network
.
train_step
.
run
(
feed_dict
=
{
mnist_network
.
images
:
batch
[
0
],
mnist_network
.
labels
:
batch
[
1
],
mnist_network
.
keep_prob
:
dropout_rate
}
)
if
i
%
100
==
0
:
test_acc
=
mnist_network
.
accuracy
.
eval
(
feed_dict
=
{
mnist_network
.
images
:
mnist
.
test
.
images
,
mnist_network
.
labels
:
mnist
.
test
.
labels
,
mnist_network
.
keep_prob
:
1.0
})
"""@nni.report_intermediate_result(test_acc)"""
logger
.
debug
(
'test accuracy %g'
,
test_acc
)
logger
.
debug
(
'Pipe send intermediate result done.'
)
test_acc
=
mnist_network
.
accuracy
.
eval
(
feed_dict
=
{
mnist_network
.
images
:
mnist
.
test
.
images
,
mnist_network
.
labels
:
mnist
.
test
.
labels
,
mnist_network
.
keep_prob
:
1.0
})
"""@nni.report_final_result(test_acc)"""
logger
.
debug
(
'Final result is %g'
,
test_acc
)
logger
.
debug
(
'Send final result done.'
)
def
generate_defualt_params
():
'''
Generate default parameters for mnist network.
'''
params
=
{
'data_dir'
:
'/tmp/tensorflow/mnist/input_data'
,
'dropout_rate'
:
0.5
,
'channel_1_num'
:
32
,
'channel_2_num'
:
64
,
'conv_size'
:
5
,
'pool_size'
:
2
,
'hidden_size'
:
1024
,
'learning_rate'
:
1e-4
,
'batch_num'
:
200
}
return
params
if
__name__
==
'__main__'
:
try
:
main
(
generate_defualt_params
())
except
Exception
as
exception
:
logger
.
exception
(
exception
)
raise
tools/nni_annotation/examples/mnist_without_annotation.json
0 → 100644
View file @
8314d6ee
{
"mnist_without_annotation/#31/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
]
},
"mnist_without_annotation/#68/function_choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
]
},
"mnist_without_annotation/batch_num/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
]
},
"mnist_without_annotation/conv-size/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
,
3
]
},
"mnist_without_annotation/dropout_rate/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
]
},
"mnist_without_annotation/h_pool1/function_choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
]
},
"mnist_without_annotation/learning_rate/uniform"
:
{
"_type"
:
"uniform"
,
"_value"
:
[
0.0001
,
0.1
]
}
}
\ No newline at end of file
tools/nni_annotation/examples/mnist_without_annotation.py
0 → 100644
View file @
8314d6ee
#!/usr/bin/python
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""A deep MNIST classifier using convolutional layers."""
import
logging
import
math
import
tempfile
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
import
nni
FLAGS
=
None
logger
=
logging
.
getLogger
(
'mnist_AutoML'
)
class
MnistNetwork
(
object
):
'''
MnistNetwork is for initlizing and building basic network for mnist.
'''
def
__init__
(
self
,
channel_1_num
,
channel_2_num
,
pool_size
,
learning_rate
,
x_dim
=
784
,
y_dim
=
10
):
self
.
channel_1_num
=
channel_1_num
self
.
channel_2_num
=
channel_2_num
self
.
conv_size
=
nni
.
choice
(
2
,
3
,
5
,
7
,
name
=
'conv-size'
)
self
.
hidden_size
=
nni
.
choice
(
124
,
512
,
1024
)
# example: without name
self
.
pool_size
=
pool_size
self
.
learning_rate
=
nni
.
uniform
(
0.0001
,
0.1
,
name
=
'learning_rate'
)
self
.
x_dim
=
x_dim
self
.
y_dim
=
y_dim
self
.
images
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
x_dim
],
name
=
'input_x'
)
self
.
labels
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
y_dim
],
name
=
'input_y'
)
self
.
keep_prob
=
tf
.
placeholder
(
tf
.
float32
,
name
=
'keep_prob'
)
self
.
train_step
=
None
self
.
accuracy
=
None
def
build_network
(
self
):
'''
Building network for mnist
'''
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with
tf
.
name_scope
(
'reshape'
):
try
:
input_dim
=
int
(
math
.
sqrt
(
self
.
x_dim
))
except
:
print
(
'input dim cannot be sqrt and reshape. input dim: '
+
str
(
self
.
x_dim
))
logger
.
debug
(
'input dim cannot be sqrt and reshape. input dim: %s'
,
str
(
self
.
x_dim
))
raise
x_image
=
tf
.
reshape
(
self
.
images
,
[
-
1
,
input_dim
,
input_dim
,
1
])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with
tf
.
name_scope
(
'conv1'
):
w_conv1
=
weight_variable
(
[
self
.
conv_size
,
self
.
conv_size
,
1
,
self
.
channel_1_num
])
b_conv1
=
bias_variable
([
self
.
channel_1_num
])
h_conv1
=
nni
.
function_choice
(
lambda
:
tf
.
nn
.
relu
(
conv2d
(
x_image
,
w_conv1
)
+
b_conv1
),
lambda
:
tf
.
nn
.
sigmoid
(
conv2d
(
x_image
,
w_conv1
)
+
b_conv1
),
lambda
:
tf
.
nn
.
tanh
(
conv2d
(
x_image
,
w_conv1
)
+
b_conv1
)
)
# example: without name
# Pooling layer - downsamples by 2X.
with
tf
.
name_scope
(
'pool1'
):
h_pool1
=
max_pool
(
h_conv1
,
self
.
pool_size
)
h_pool1
=
nni
.
function_choice
(
lambda
:
max_pool
(
h_conv1
,
self
.
pool_size
),
lambda
:
avg_pool
(
h_conv1
,
self
.
pool_size
),
name
=
'h_pool1'
)
# Second convolutional layer -- maps 32 feature maps to 64.
with
tf
.
name_scope
(
'conv2'
):
w_conv2
=
weight_variable
([
self
.
conv_size
,
self
.
conv_size
,
self
.
channel_1_num
,
self
.
channel_2_num
])
b_conv2
=
bias_variable
([
self
.
channel_2_num
])
h_conv2
=
tf
.
nn
.
relu
(
conv2d
(
h_pool1
,
w_conv2
)
+
b_conv2
)
# Second pooling layer.
with
tf
.
name_scope
(
'pool2'
):
# example: another style
h_pool2
=
max_pool
(
h_conv2
,
self
.
pool_size
)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
last_dim
=
int
(
input_dim
/
(
self
.
pool_size
*
self
.
pool_size
))
with
tf
.
name_scope
(
'fc1'
):
w_fc1
=
weight_variable
(
[
last_dim
*
last_dim
*
self
.
channel_2_num
,
self
.
hidden_size
])
b_fc1
=
bias_variable
([
self
.
hidden_size
])
h_pool2_flat
=
tf
.
reshape
(
h_pool2
,
[
-
1
,
last_dim
*
last_dim
*
self
.
channel_2_num
])
h_fc1
=
tf
.
nn
.
relu
(
tf
.
matmul
(
h_pool2_flat
,
w_fc1
)
+
b_fc1
)
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with
tf
.
name_scope
(
'dropout'
):
h_fc1_drop
=
tf
.
nn
.
dropout
(
h_fc1
,
self
.
keep_prob
)
# Map the 1024 features to 10 classes, one for each digit
with
tf
.
name_scope
(
'fc2'
):
w_fc2
=
weight_variable
([
self
.
hidden_size
,
self
.
y_dim
])
b_fc2
=
bias_variable
([
self
.
y_dim
])
y_conv
=
tf
.
matmul
(
h_fc1_drop
,
w_fc2
)
+
b_fc2
with
tf
.
name_scope
(
'loss'
):
cross_entropy
=
tf
.
reduce_mean
(
tf
.
nn
.
softmax_cross_entropy_with_logits
(
labels
=
self
.
labels
,
logits
=
y_conv
))
with
tf
.
name_scope
(
'adam_optimizer'
):
self
.
train_step
=
tf
.
train
.
AdamOptimizer
(
self
.
learning_rate
).
minimize
(
cross_entropy
)
with
tf
.
name_scope
(
'accuracy'
):
correct_prediction
=
tf
.
equal
(
tf
.
argmax
(
y_conv
,
1
),
tf
.
argmax
(
self
.
labels
,
1
))
self
.
accuracy
=
tf
.
reduce_mean
(
tf
.
cast
(
correct_prediction
,
tf
.
float32
))
def
conv2d
(
x_input
,
w_matrix
):
"""conv2d returns a 2d convolution layer with full stride."""
return
tf
.
nn
.
conv2d
(
x_input
,
w_matrix
,
strides
=
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
)
def
max_pool
(
x_input
,
pool_size
):
"""max_pool downsamples a feature map by 2X."""
return
tf
.
nn
.
max_pool
(
x_input
,
ksize
=
[
1
,
pool_size
,
pool_size
,
1
],
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
avg_pool
(
x_input
,
pool_size
):
return
tf
.
nn
.
avg_pool
(
x_input
,
ksize
=
[
1
,
pool_size
,
pool_size
,
1
],
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
weight_variable
(
shape
):
"""weight_variable generates a weight variable of a given shape."""
initial
=
tf
.
truncated_normal
(
shape
,
stddev
=
0.1
)
return
tf
.
Variable
(
initial
)
def
bias_variable
(
shape
):
"""bias_variable generates a bias variable of a given shape."""
initial
=
tf
.
constant
(
0.1
,
shape
=
shape
)
return
tf
.
Variable
(
initial
)
def
main
(
params
):
'''
Main function, build mnist network, run and send result to NNI.
'''
# Import data
mnist
=
input_data
.
read_data_sets
(
params
[
'data_dir'
],
one_hot
=
True
)
print
(
'Mnist download data down.'
)
logger
.
debug
(
'Mnist download data down.'
)
# Create the model
# Build the graph for the deep net
mnist_network
=
MnistNetwork
(
channel_1_num
=
params
[
'channel_1_num'
],
channel_2_num
=
params
[
'channel_2_num'
],
pool_size
=
params
[
'pool_size'
])
mnist_network
.
build_network
()
logger
.
debug
(
'Mnist build network done.'
)
# Write log
graph_location
=
tempfile
.
mkdtemp
()
logger
.
debug
(
'Saving graph to: %s'
,
graph_location
)
train_writer
=
tf
.
summary
.
FileWriter
(
graph_location
)
train_writer
.
add_graph
(
tf
.
get_default_graph
())
test_acc
=
0.0
with
tf
.
Session
()
as
sess
:
sess
.
run
(
tf
.
global_variables_initializer
())
batch_num
=
nni
.
choice
(
50
,
250
,
500
,
name
=
'batch_num'
)
for
i
in
range
(
batch_num
):
batch
=
mnist
.
train
.
next_batch
(
batch_num
)
dropout_rate
=
nni
.
choice
(
1
,
5
,
name
=
'dropout_rate'
)
mnist_network
.
train_step
.
run
(
feed_dict
=
{
mnist_network
.
images
:
batch
[
0
],
mnist_network
.
labels
:
batch
[
1
],
mnist_network
.
keep_prob
:
dropout_rate
}
)
if
i
%
100
==
0
:
test_acc
=
mnist_network
.
accuracy
.
eval
(
feed_dict
=
{
mnist_network
.
images
:
mnist
.
test
.
images
,
mnist_network
.
labels
:
mnist
.
test
.
labels
,
mnist_network
.
keep_prob
:
1.0
})
nni
.
report_intermediate_result
(
test_acc
)
logger
.
debug
(
'test accuracy %g'
,
test_acc
)
logger
.
debug
(
'Pipe send intermediate result done.'
)
test_acc
=
mnist_network
.
accuracy
.
eval
(
feed_dict
=
{
mnist_network
.
images
:
mnist
.
test
.
images
,
mnist_network
.
labels
:
mnist
.
test
.
labels
,
mnist_network
.
keep_prob
:
1.0
})
nni
.
report_final_result
(
test_acc
)
logger
.
debug
(
'Final result is %g'
,
test_acc
)
logger
.
debug
(
'Send final result done.'
)
def
generate_defualt_params
():
'''
Generate default parameters for mnist network.
'''
params
=
{
'data_dir'
:
'/tmp/tensorflow/mnist/input_data'
,
'channel_1_num'
:
32
,
'channel_2_num'
:
64
,
'pool_size'
:
2
}
return
params
if
__name__
==
'__main__'
:
try
:
main
(
generate_defualt_params
())
except
Exception
as
exception
:
logger
.
exception
(
exception
)
raise
tools/nni_annotation/search_space_generator.py
0 → 100644
View file @
8314d6ee
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
import
ast
# pylint: disable=unidiomatic-typecheck
# list of functions related to search space generating
_ss_funcs
=
[
'choice'
,
'randint'
,
'uniform'
,
'quniform'
,
'loguniform'
,
'qloguniform'
,
'normal'
,
'qnormal'
,
'lognormal'
,
'qlognormal'
,
'function_choice'
]
class
SearchSpaceGenerator
(
ast
.
NodeVisitor
):
"""Generate search space from smart parater APIs"""
def
__init__
(
self
,
module_name
):
self
.
module_name
=
module_name
self
.
search_space
=
{}
self
.
last_line
=
0
# last parsed line, useful for error reporting
def
visit_Call
(
self
,
node
):
# pylint: disable=invalid-name
self
.
generic_visit
(
node
)
# ignore if the function is not 'nni.*'
if
type
(
node
.
func
)
is
not
ast
.
Attribute
:
return
if
type
(
node
.
func
.
value
)
is
not
ast
.
Name
:
return
if
node
.
func
.
value
.
id
!=
'nni'
:
return
# ignore if its not a search space function (e.g. `report_final_result`)
func
=
node
.
func
.
attr
if
func
not
in
_ss_funcs
:
return
self
.
last_line
=
node
.
lineno
if
node
.
keywords
:
# there is a `name` argument
assert
len
(
node
.
keywords
)
==
1
,
'Smart parameter has keyword argument other than "name"'
assert
node
.
keywords
[
0
].
arg
==
'name'
,
'Smart paramater
\'
s keyword argument is not "name"'
assert
type
(
node
.
keywords
[
0
].
value
)
is
ast
.
Str
,
'Smart parameter
\'
s name must be string literal'
name
=
node
.
keywords
[
0
].
value
.
s
specified_name
=
True
else
:
# generate the missing name automatically
assert
len
(
node
.
args
)
>
0
,
'Smart parameter expression has no argument'
name
=
'#'
+
str
(
node
.
args
[
-
1
].
lineno
)
specified_name
=
False
if
func
in
(
'choice'
,
'function_choice'
):
# arguments of `choice` may contain complex expression,
# so use indices instead of arguments
args
=
list
(
range
(
len
(
node
.
args
)))
else
:
# arguments of other functions must be literal number
assert
all
(
type
(
arg
)
is
ast
.
Num
for
arg
in
node
.
args
),
'Smart parameter
\'
s arguments must be number literals'
args
=
[
arg
.
n
for
arg
in
node
.
args
]
key
=
self
.
module_name
+
'/'
+
name
+
'/'
+
func
if
func
==
'function_choice'
:
func
=
'choice'
value
=
{
'_type'
:
func
,
'_value'
:
args
}
if
specified_name
:
# multiple functions with same name must have identical arguments
old
=
self
.
search_space
.
get
(
key
)
assert
old
is
None
or
old
==
value
,
'Different smart parameters have same name'
else
:
# generated name must not duplicate
assert
key
not
in
self
.
search_space
,
'Only one smart parameter is allowed in a line'
self
.
search_space
[
key
]
=
value
def
generate
(
module_name
,
code
):
"""Generate search space.
Return a serializable search space object.
module_name: name of the module (str)
code: user code (str)
"""
try
:
ast_tree
=
ast
.
parse
(
code
)
except
Exception
:
raise
RuntimeError
(
'Bad Python code'
)
visitor
=
SearchSpaceGenerator
(
module_name
)
try
:
visitor
.
visit
(
ast_tree
)
except
AssertionError
as
exc
:
raise
RuntimeError
(
'%d: %s'
%
(
visitor
.
last_line
,
exc
.
args
[
0
]))
return
visitor
.
search_space
tools/nni_annotation/test_annotation.py
0 → 100644
View file @
8314d6ee
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ==================================================================================================
# pylint: skip-file
from
.__init__
import
*
import
ast
import
json
import
os
import
shutil
from
unittest
import
TestCase
,
main
class
AnnotationTestCase
(
TestCase
):
@
classmethod
def
setUpClass
(
cls
):
os
.
chdir
(
'nni_annotation'
)
if
os
.
path
.
isdir
(
'_generated'
):
shutil
.
rmtree
(
'_generated'
)
def
test_search_space_generator
(
self
):
search_space
=
generate_search_space
(
'testcase/annotated'
)
with
open
(
'testcase/searchspace.json'
)
as
f
:
self
.
assertEqual
(
search_space
,
json
.
load
(
f
))
def
test_code_generator
(
self
):
expand_annotations
(
'testcase/usercode'
,
'_generated'
)
self
.
_assert_source_equal
(
'testcase/annotated/mnist.py'
,
'_generated/mnist.py'
)
self
.
_assert_source_equal
(
'testcase/annotated/dir/simple.py'
,
'_generated/dir/simple.py'
)
with
open
(
'testcase/usercode/nonpy.txt'
)
as
src
,
open
(
'_generated/nonpy.txt'
)
as
dst
:
assert
src
.
read
()
==
dst
.
read
()
def
_assert_source_equal
(
self
,
src1
,
src2
):
with
open
(
src1
)
as
f1
,
open
(
src2
)
as
f2
:
ast1
=
ast
.
dump
(
ast
.
parse
(
f1
.
read
()))
ast2
=
ast
.
dump
(
ast
.
parse
(
f2
.
read
()))
self
.
assertEqual
(
ast1
,
ast2
)
if
__name__
==
'__main__'
:
main
()
tools/nni_annotation/testcase/annotated/dir/simple.py
0 → 100644
View file @
8314d6ee
import
nni
def
max_pool
(
k
):
pass
h_conv1
=
1
conv_size
=
nni
.
choice
(
2
,
3
,
5
,
7
,
name
=
'conv_size'
)
h_pool1
=
nni
.
function_choice
(
lambda
:
max_pool
(
h_conv1
),
lambda
:
avg_pool
(
h_conv2
,
h_conv3
),
name
=
'max_pool'
)
test_acc
=
1
nni
.
report_intermediate_result
(
test_acc
)
test_acc
=
2
nni
.
report_final_result
(
test_acc
)
tools/nni_annotation/testcase/annotated/handwrite.py
0 → 100644
View file @
8314d6ee
h_conv1
=
1
conv_size
=
nni
.
choice
(
2
,
3
,
5
,
7
,
name
=
'conv_size'
)
h_pool1
=
nni
.
function_choice
(
lambda
:
max_pool
(
h_conv1
),
lambda
:
h_conv1
,
lambda
:
avg_pool
(
h_conv2
,
h_conv3
)
)
tmp
=
nni
.
qlognormal
(
1.2
,
3
,
4.5
)
test_acc
=
1
nni
.
report_intermediate_result
(
test_acc
)
test_acc
=
2
nni
.
report_final_result
(
test_acc
)
nni
.
choice
(
foo
,
bar
)(
1
)
tools/nni_annotation/testcase/annotated/mnist.py
0 → 100644
View file @
8314d6ee
"""A deep MNIST classifier using convolutional layers.
"""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
nni
import
logging
import
math
import
tempfile
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
logger
=
logging
.
getLogger
(
'mnist'
)
FLAGS
=
None
class
MnistNetwork
(
object
):
def
__init__
(
self
,
channel_1_num
=
32
,
channel_2_num
=
64
,
conv_size
=
5
,
hidden_size
=
1024
,
pool_size
=
2
,
learning_rate
=
0.0001
,
x_dim
=
784
,
y_dim
=
10
):
self
.
channel_1_num
=
channel_1_num
self
.
channel_2_num
=
channel_2_num
self
.
conv_size
=
nni
.
choice
(
2
,
3
,
5
,
7
,
name
=
'self.conv_size'
)
self
.
hidden_size
=
nni
.
choice
(
124
,
512
,
1024
,
name
=
'self.hidden_size'
)
self
.
pool_size
=
pool_size
self
.
learning_rate
=
nni
.
randint
(
2
,
3
,
5
,
name
=
'self.learning_rate'
)
self
.
x_dim
=
x_dim
self
.
y_dim
=
y_dim
def
build_network
(
self
):
self
.
x
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
x_dim
],
name
=
'input_x'
)
self
.
y
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
y_dim
],
name
=
'input_y'
)
self
.
keep_prob
=
tf
.
placeholder
(
tf
.
float32
,
name
=
'keep_prob'
)
with
tf
.
name_scope
(
'reshape'
):
try
:
input_dim
=
int
(
math
.
sqrt
(
self
.
x_dim
))
except
:
logger
.
debug
(
'input dim cannot be sqrt and reshape. input dim: '
+
str
(
self
.
x_dim
))
raise
x_image
=
tf
.
reshape
(
self
.
x
,
[
-
1
,
input_dim
,
input_dim
,
1
])
with
tf
.
name_scope
(
'conv1'
):
W_conv1
=
weight_variable
([
self
.
conv_size
,
self
.
conv_size
,
1
,
self
.
channel_1_num
])
b_conv1
=
bias_variable
([
self
.
channel_1_num
])
h_conv1
=
nni
.
function_choice
(
lambda
:
tf
.
nn
.
relu
(
conv2d
(
x_image
,
W_conv1
)
+
b_conv1
),
lambda
:
tf
.
nn
.
sigmoid
(
conv2d
(
x_image
,
W_conv1
)
+
b_conv1
),
lambda
:
tf
.
nn
.
tanh
(
conv2d
(
x_image
,
W_conv1
)
+
b_conv1
),
name
=
'tf.nn.relu'
)
with
tf
.
name_scope
(
'pool1'
):
h_pool1
=
nni
.
function_choice
(
lambda
:
max_pool
(
h_conv1
,
self
.
pool_size
),
lambda
:
avg_pool
(
h_conv1
,
self
.
pool_size
),
name
=
'max_pool'
)
with
tf
.
name_scope
(
'conv2'
):
W_conv2
=
weight_variable
([
self
.
conv_size
,
self
.
conv_size
,
self
.
channel_1_num
,
self
.
channel_2_num
])
b_conv2
=
bias_variable
([
self
.
channel_2_num
])
h_conv2
=
tf
.
nn
.
relu
(
conv2d
(
h_pool1
,
W_conv2
)
+
b_conv2
)
with
tf
.
name_scope
(
'pool2'
):
h_pool2
=
max_pool
(
h_conv2
,
self
.
pool_size
)
last_dim
=
int
(
input_dim
/
(
self
.
pool_size
*
self
.
pool_size
))
with
tf
.
name_scope
(
'fc1'
):
W_fc1
=
weight_variable
([
last_dim
*
last_dim
*
self
.
channel_2_num
,
self
.
hidden_size
])
b_fc1
=
bias_variable
([
self
.
hidden_size
])
h_pool2_flat
=
tf
.
reshape
(
h_pool2
,
[
-
1
,
last_dim
*
last_dim
*
self
.
channel_2_num
])
h_fc1
=
tf
.
nn
.
relu
(
tf
.
matmul
(
h_pool2_flat
,
W_fc1
)
+
b_fc1
)
with
tf
.
name_scope
(
'dropout'
):
h_fc1_drop
=
tf
.
nn
.
dropout
(
h_fc1
,
self
.
keep_prob
)
with
tf
.
name_scope
(
'fc2'
):
W_fc2
=
weight_variable
([
self
.
hidden_size
,
self
.
y_dim
])
b_fc2
=
bias_variable
([
self
.
y_dim
])
y_conv
=
tf
.
matmul
(
h_fc1_drop
,
W_fc2
)
+
b_fc2
with
tf
.
name_scope
(
'loss'
):
cross_entropy
=
tf
.
reduce_mean
(
tf
.
nn
.
softmax_cross_entropy_with_logits
(
labels
=
self
.
y
,
logits
=
y_conv
)
)
with
tf
.
name_scope
(
'adam_optimizer'
):
self
.
train_step
=
tf
.
train
.
AdamOptimizer
(
self
.
learning_rate
).
minimize
(
cross_entropy
)
with
tf
.
name_scope
(
'accuracy'
):
correct_prediction
=
tf
.
equal
(
tf
.
argmax
(
y_conv
,
1
),
tf
.
argmax
(
self
.
y
,
1
))
self
.
accuracy
=
tf
.
reduce_mean
(
tf
.
cast
(
correct_prediction
,
tf
.
float32
))
return
def
conv2d
(
x
,
W
):
"""conv2d returns a 2d convolution layer with full stride."""
return
tf
.
nn
.
conv2d
(
x
,
W
,
strides
=
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
)
def
max_pool
(
x
,
pool_size
):
"""max_pool downsamples a feature map by 2X."""
return
tf
.
nn
.
max_pool
(
x
,
ksize
=
[
1
,
pool_size
,
pool_size
,
1
],
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
avg_pool
(
x
,
pool_size
):
return
tf
.
nn
.
avg_pool
(
x
,
ksize
=
[
1
,
pool_size
,
pool_size
,
1
],
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
weight_variable
(
shape
):
"""weight_variable generates a weight variable of a given shape."""
initial
=
tf
.
truncated_normal
(
shape
,
stddev
=
0.1
)
return
tf
.
Variable
(
initial
)
def
bias_variable
(
shape
):
"""bias_variable generates a bias variable of a given shape."""
initial
=
tf
.
constant
(
0.1
,
shape
=
shape
)
return
tf
.
Variable
(
initial
)
def
main
():
data_dir
=
'/tmp/tensorflow/mnist/input_data'
mnist
=
input_data
.
read_data_sets
(
data_dir
,
one_hot
=
True
)
logger
.
debug
(
'Mnist download data down.'
)
mnist_network
=
MnistNetwork
()
mnist_network
.
build_network
()
logger
.
debug
(
'Mnist build network done.'
)
graph_location
=
tempfile
.
mkdtemp
()
logger
.
debug
(
'Saving graph to: %s'
%
graph_location
)
train_writer
=
tf
.
summary
.
FileWriter
(
graph_location
)
train_writer
.
add_graph
(
tf
.
get_default_graph
())
test_acc
=
0.0
with
tf
.
Session
()
as
sess
:
sess
.
run
(
tf
.
global_variables_initializer
())
batch_num
=
200
for
i
in
range
(
batch_num
):
batch_size
=
nni
.
choice
(
50
,
250
,
500
,
name
=
'batch_size'
)
batch
=
mnist
.
train
.
next_batch
(
batch_size
)
dropout_rate
=
nni
.
choice
(
1
,
5
,
name
=
'dropout_rate'
)
mnist_network
.
train_step
.
run
(
feed_dict
=
{
mnist_network
.
x
:
batch
[
0
],
mnist_network
.
y
:
batch
[
1
],
mnist_network
.
keep_prob
:
dropout_rate
})
if
i
%
100
==
0
:
test_acc
=
mnist_network
.
accuracy
.
eval
(
feed_dict
=
{
mnist_network
.
x
:
mnist
.
test
.
images
,
mnist_network
.
y
:
mnist
.
test
.
labels
,
mnist_network
.
keep_prob
:
1.0
})
nni
.
report_intermediate_result
(
test_acc
)
test_acc
=
mnist_network
.
accuracy
.
eval
(
feed_dict
=
{
mnist_network
.
x
:
mnist
.
test
.
images
,
mnist_network
.
y
:
mnist
.
test
.
labels
,
mnist_network
.
keep_prob
:
1.0
})
nni
.
report_final_result
(
test_acc
)
def
generate_default_params
():
params
=
{
'data_dir'
:
'/tmp/tensorflow/mnist/input_data'
,
'dropout_rate'
:
0.5
,
'channel_1_num'
:
32
,
'channel_2_num'
:
64
,
'conv_size'
:
5
,
'pool_size'
:
2
,
'hidden_size'
:
1024
,
'batch_size'
:
50
,
'batch_num'
:
200
,
'learning_rate'
:
0.0001
}
return
params
if
__name__
==
'__main__'
:
try
:
params
=
generate_default_params
()
logger
.
debug
(
'params'
)
logger
.
debug
(
'params update'
)
main
()
except
:
logger
.
exception
(
'Got some exception in while loop in mnist.py'
)
raise
tools/nni_annotation/testcase/searchspace.json
0 → 100644
View file @
8314d6ee
{
"handwrite/conv_size/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
,
3
]
},
"handwrite/#5/function_choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
]
},
"handwrite/#8/qlognormal"
:
{
"_type"
:
"qlognormal"
,
"_value"
:
[
1.2
,
3
,
4.5
]
},
"handwrite/#13/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
]
},
"mnist/self.conv_size/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
,
3
]
},
"mnist/self.hidden_size/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
]
},
"mnist/self.learning_rate/randint"
:
{
"_type"
:
"randint"
,
"_value"
:
[
2
,
3
,
5
]
},
"mnist/tf.nn.relu/function_choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
]
},
"mnist/max_pool/function_choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
]
},
"mnist/batch_size/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
]
},
"mnist/dropout_rate/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
]
},
"dir.simple/conv_size/choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
,
2
,
3
]
},
"dir.simple/max_pool/function_choice"
:
{
"_type"
:
"choice"
,
"_value"
:
[
0
,
1
]
}
}
tools/nni_annotation/testcase/usercode/dir/simple.py
0 → 100644
View file @
8314d6ee
def
max_pool
(
k
):
pass
h_conv1
=
1
"""@nni.variable(nni.choice(2,3,5,7),name=conv_size)"""
conv_size
=
5
"""@nni.function_choice(max_pool(h_conv1),avg_pool(h_conv2,h_conv3),name=max_pool)"""
h_pool1
=
max_pool
(
h_conv1
)
test_acc
=
1
'''@nni.report_intermediate_result(test_acc)'''
test_acc
=
2
'''@nni.report_final_result(test_acc)'''
tools/nni_annotation/testcase/usercode/mnist.py
0 → 100644
View file @
8314d6ee
# -*- encoding:utf8 -*-
"""A deep MNIST classifier using convolutional layers.
"""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
logging
import
math
import
tempfile
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
logger
=
logging
.
getLogger
(
'mnist'
)
FLAGS
=
None
class
MnistNetwork
(
object
):
def
__init__
(
self
,
channel_1_num
=
32
,
channel_2_num
=
64
,
conv_size
=
5
,
hidden_size
=
1024
,
pool_size
=
2
,
learning_rate
=
0.0001
,
x_dim
=
784
,
y_dim
=
10
):
self
.
channel_1_num
=
channel_1_num
self
.
channel_2_num
=
channel_2_num
'''@nni.variable(nni.choice(2,3,5,7),name=self.conv_size)'''
self
.
conv_size
=
conv_size
'''@nni.variable(nni.choice(124,512,1024),name=self.hidden_size)'''
self
.
hidden_size
=
hidden_size
self
.
pool_size
=
pool_size
'''@nni.variable(nni.randint(2,3,5),name=self.learning_rate)'''
self
.
learning_rate
=
learning_rate
self
.
x_dim
=
x_dim
self
.
y_dim
=
y_dim
def
build_network
(
self
):
self
.
x
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
x_dim
],
name
=
'input_x'
)
self
.
y
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
y_dim
],
name
=
'input_y'
)
self
.
keep_prob
=
tf
.
placeholder
(
tf
.
float32
,
name
=
'keep_prob'
)
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with
tf
.
name_scope
(
'reshape'
):
try
:
input_dim
=
int
(
math
.
sqrt
(
self
.
x_dim
))
except
:
#print('input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim))
logger
.
debug
(
'input dim cannot be sqrt and reshape. input dim: '
+
str
(
self
.
x_dim
))
raise
x_image
=
tf
.
reshape
(
self
.
x
,
[
-
1
,
input_dim
,
input_dim
,
1
])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with
tf
.
name_scope
(
'conv1'
):
W_conv1
=
weight_variable
([
self
.
conv_size
,
self
.
conv_size
,
1
,
self
.
channel_1_num
])
b_conv1
=
bias_variable
([
self
.
channel_1_num
])
"""@nni.function_choice(tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1),tf.nn.sigmoid(conv2d(x_image, W_conv1) + b_conv1),tf.nn.tanh(conv2d(x_image, W_conv1) + b_conv1),name=tf.nn.relu)"""
h_conv1
=
tf
.
nn
.
relu
(
conv2d
(
x_image
,
W_conv1
)
+
b_conv1
)
# Pooling layer - downsamples by 2X.
with
tf
.
name_scope
(
'pool1'
):
"""@nni.function_choice(max_pool(h_conv1, self.pool_size),avg_pool(h_conv1, self.pool_size),name=max_pool)"""
h_pool1
=
max_pool
(
h_conv1
,
self
.
pool_size
)
# Second convolutional layer -- maps 32 feature maps to 64.
with
tf
.
name_scope
(
'conv2'
):
W_conv2
=
weight_variable
([
self
.
conv_size
,
self
.
conv_size
,
self
.
channel_1_num
,
self
.
channel_2_num
])
b_conv2
=
bias_variable
([
self
.
channel_2_num
])
h_conv2
=
tf
.
nn
.
relu
(
conv2d
(
h_pool1
,
W_conv2
)
+
b_conv2
)
# Second pooling layer.
with
tf
.
name_scope
(
'pool2'
):
#"""@nni.dynamic(input={cnn_block:1, concat:2},function_choice={"cnn_block":(x,nni.choice([3,4])),"cnn_block":(x),"concat":(x,y)},limit={"cnn_block.input":[concat,input],"concat.input":[this.depth-1,this.depth-3,this.depth-5],"graph.width":[1]})"""
h_pool2
=
max_pool
(
h_conv2
,
self
.
pool_size
)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
last_dim
=
int
(
input_dim
/
(
self
.
pool_size
*
self
.
pool_size
))
with
tf
.
name_scope
(
'fc1'
):
W_fc1
=
weight_variable
([
last_dim
*
last_dim
*
self
.
channel_2_num
,
self
.
hidden_size
])
b_fc1
=
bias_variable
([
self
.
hidden_size
])
h_pool2_flat
=
tf
.
reshape
(
h_pool2
,
[
-
1
,
last_dim
*
last_dim
*
self
.
channel_2_num
])
h_fc1
=
tf
.
nn
.
relu
(
tf
.
matmul
(
h_pool2_flat
,
W_fc1
)
+
b_fc1
)
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with
tf
.
name_scope
(
'dropout'
):
h_fc1_drop
=
tf
.
nn
.
dropout
(
h_fc1
,
self
.
keep_prob
)
# Map the 1024 features to 10 classes, one for each digit
with
tf
.
name_scope
(
'fc2'
):
W_fc2
=
weight_variable
([
self
.
hidden_size
,
self
.
y_dim
])
b_fc2
=
bias_variable
([
self
.
y_dim
])
y_conv
=
tf
.
matmul
(
h_fc1_drop
,
W_fc2
)
+
b_fc2
with
tf
.
name_scope
(
'loss'
):
cross_entropy
=
tf
.
reduce_mean
(
tf
.
nn
.
softmax_cross_entropy_with_logits
(
labels
=
self
.
y
,
logits
=
y_conv
))
with
tf
.
name_scope
(
'adam_optimizer'
):
self
.
train_step
=
tf
.
train
.
AdamOptimizer
(
self
.
learning_rate
).
minimize
(
cross_entropy
)
with
tf
.
name_scope
(
'accuracy'
):
correct_prediction
=
tf
.
equal
(
tf
.
argmax
(
y_conv
,
1
),
tf
.
argmax
(
self
.
y
,
1
))
self
.
accuracy
=
tf
.
reduce_mean
(
tf
.
cast
(
correct_prediction
,
tf
.
float32
))
return
def
conv2d
(
x
,
W
):
"""conv2d returns a 2d convolution layer with full stride."""
return
tf
.
nn
.
conv2d
(
x
,
W
,
strides
=
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
)
def
max_pool
(
x
,
pool_size
):
"""max_pool downsamples a feature map by 2X."""
return
tf
.
nn
.
max_pool
(
x
,
ksize
=
[
1
,
pool_size
,
pool_size
,
1
],
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
avg_pool
(
x
,
pool_size
):
return
tf
.
nn
.
avg_pool
(
x
,
ksize
=
[
1
,
pool_size
,
pool_size
,
1
],
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
weight_variable
(
shape
):
"""weight_variable generates a weight variable of a given shape."""
initial
=
tf
.
truncated_normal
(
shape
,
stddev
=
0.1
)
return
tf
.
Variable
(
initial
)
def
bias_variable
(
shape
):
"""bias_variable generates a bias variable of a given shape."""
initial
=
tf
.
constant
(
0.1
,
shape
=
shape
)
return
tf
.
Variable
(
initial
)
def
main
():
# Import data
data_dir
=
'/tmp/tensorflow/mnist/input_data'
mnist
=
input_data
.
read_data_sets
(
data_dir
,
one_hot
=
True
)
logger
.
debug
(
'Mnist download data down.'
)
# Create the model
# Build the graph for the deep net
mnist_network
=
MnistNetwork
()
mnist_network
.
build_network
()
logger
.
debug
(
'Mnist build network done.'
)
# Write log
graph_location
=
tempfile
.
mkdtemp
()
logger
.
debug
(
'Saving graph to: %s'
%
graph_location
)
# print('Saving graph to: %s' % graph_location)
train_writer
=
tf
.
summary
.
FileWriter
(
graph_location
)
train_writer
.
add_graph
(
tf
.
get_default_graph
())
test_acc
=
0.0
with
tf
.
Session
()
as
sess
:
sess
.
run
(
tf
.
global_variables_initializer
())
batch_num
=
200
for
i
in
range
(
batch_num
):
'''@nni.variable(nni.choice(50,250,500),name=batch_size)'''
batch_size
=
50
batch
=
mnist
.
train
.
next_batch
(
batch_size
)
'''@nni.variable(nni.choice(1,5),name=dropout_rate)'''
dropout_rate
=
0.5
mnist_network
.
train_step
.
run
(
feed_dict
=
{
mnist_network
.
x
:
batch
[
0
],
mnist_network
.
y
:
batch
[
1
],
mnist_network
.
keep_prob
:
dropout_rate
})
if
i
%
100
==
0
:
#train_accuracy = mnist_network.accuracy.eval(feed_dict={
# mnist_network.x: batch[0], mnist_network.y: batch[1], mnist_network.keep_prob: params['dropout_rate']})
#print('step %d, training accuracy %g' % (i, train_accuracy))
test_acc
=
mnist_network
.
accuracy
.
eval
(
feed_dict
=
{
mnist_network
.
x
:
mnist
.
test
.
images
,
mnist_network
.
y
:
mnist
.
test
.
labels
,
mnist_network
.
keep_prob
:
1.0
})
'''@nni.report_intermediate_result(test_acc)'''
test_acc
=
mnist_network
.
accuracy
.
eval
(
feed_dict
=
{
mnist_network
.
x
:
mnist
.
test
.
images
,
mnist_network
.
y
:
mnist
.
test
.
labels
,
mnist_network
.
keep_prob
:
1.0
})
'''@nni.report_final_result(test_acc)'''
def
generate_default_params
():
params
=
{
'data_dir'
:
'/tmp/tensorflow/mnist/input_data'
,
'dropout_rate'
:
0.5
,
'channel_1_num'
:
32
,
'channel_2_num'
:
64
,
'conv_size'
:
5
,
'pool_size'
:
2
,
'hidden_size'
:
1024
,
'batch_size'
:
50
,
'batch_num'
:
200
,
'learning_rate'
:
1e-4
}
return
params
if
__name__
==
'__main__'
:
# run command: python mnist.py --init_file_path ./init.json
#FLAGS, unparsed = parse_command()
#original_params = parse_init_json(FLAGS.init_file_path, {})
#pipe_interface.set_params_to_env()
try
:
params
=
generate_default_params
()
logger
.
debug
(
'params'
)
logger
.
debug
(
'params update'
)
main
()
except
:
logger
.
exception
(
'Got some exception in while loop in mnist.py'
)
raise
tools/nni_annotation/testcase/usercode/nonpy.txt
0 → 100644
View file @
8314d6ee
hello
tools/nnicmd/config_utils.py
View file @
8314d6ee
...
...
@@ -33,7 +33,7 @@ class Config:
def
get_all_config
(
self
):
'''get all of config values'''
return
json
.
dumps
(
self
.
config
)
return
json
.
dumps
(
self
.
config
,
indent
=
4
,
sort_keys
=
True
,
separators
=
(
','
,
':'
)
)
def
set_config
(
self
,
key
,
value
):
'''set {key:value} paris to self.config'''
...
...
tools/nnicmd/launcher.py
View file @
8314d6ee
...
...
@@ -22,9 +22,9 @@
import
json
import
os
import
shutil
from
subprocess
import
Popen
,
PIPE
from
subprocess
import
Popen
,
PIPE
,
call
import
tempfile
from
annotation
import
*
from
nni_
annotation
import
*
from
.launcher_utils
import
validate_all_content
from
.rest_utils
import
rest_put
,
rest_post
,
check_rest_server
,
check_rest_server_quick
from
.url_utils
import
cluster_metadata_url
,
experiment_url
...
...
@@ -33,7 +33,7 @@ from .common_utils import get_yml_content, get_json_content, print_error, print_
from
.constants
import
EXPERIMENT_SUCCESS_INFO
,
STDOUT_FULL_PATH
,
STDERR_FULL_PATH
,
LOG_DIR
,
REST_PORT
,
ERROR_INFO
,
NORMAL_INFO
from
.webui_utils
import
start_web_ui
,
check_web_ui
def
start_rest_server
(
manager
,
port
,
platform
,
mode
,
experiment_id
=
None
):
def
start_rest_server
(
port
,
platform
,
mode
,
experiment_id
=
None
):
'''Run nni manager process'''
print_normal
(
'Checking experiment...'
)
nni_config
=
Config
()
...
...
@@ -44,6 +44,7 @@ def start_rest_server(manager, port, platform, mode, experiment_id=None):
exit
(
0
)
print_normal
(
'Starting restful server...'
)
manager
=
os
.
environ
.
get
(
'NNI_MANAGER'
,
'nnimanager'
)
cmds
=
[
manager
,
'--port'
,
str
(
port
),
'--mode'
,
platform
,
'--start_mode'
,
mode
]
if
mode
==
'resume'
:
cmds
+=
[
'--experiment_id'
,
experiment_id
]
...
...
@@ -58,9 +59,9 @@ def set_trial_config(experiment_config, port):
'''set trial configuration'''
request_data
=
dict
()
value_dict
=
dict
()
value_dict
[
'command'
]
=
experiment_config
[
'trial'
][
'
trialC
ommand'
]
value_dict
[
'codeDir'
]
=
experiment_config
[
'trial'
][
'
trialC
odeDir'
]
value_dict
[
'gpuNum'
]
=
experiment_config
[
'trial'
][
'
trialG
puNum'
]
value_dict
[
'command'
]
=
experiment_config
[
'trial'
][
'
c
ommand'
]
value_dict
[
'codeDir'
]
=
experiment_config
[
'trial'
][
'
c
odeDir'
]
value_dict
[
'gpuNum'
]
=
experiment_config
[
'trial'
][
'
g
puNum'
]
request_data
[
'trial_config'
]
=
value_dict
response
=
rest_put
(
cluster_metadata_url
(
port
),
json
.
dumps
(
request_data
),
20
)
return
True
if
response
.
status_code
==
200
else
False
...
...
@@ -75,11 +76,14 @@ def set_remote_config(experiment_config, port):
request_data
=
dict
()
request_data
[
'machine_list'
]
=
experiment_config
[
'machineList'
]
response
=
rest_put
(
cluster_metadata_url
(
port
),
json
.
dumps
(
request_data
),
20
)
err_message
=
''
if
not
response
or
not
response
.
status_code
==
200
:
return
False
if
response
is
not
None
:
err_message
=
response
.
text
return
False
,
err_message
#set trial_config
return
set_trial_config
(
experiment_config
,
port
)
return
set_trial_config
(
experiment_config
,
port
)
,
err_message
def
set_experiment
(
experiment_config
,
mode
,
port
):
'''Call startExperiment (rest POST /experiment) with yaml file content'''
...
...
@@ -89,7 +93,7 @@ def set_experiment(experiment_config, mode, port):
request_data
[
'trialConcurrency'
]
=
experiment_config
[
'trialConcurrency'
]
request_data
[
'maxExecDuration'
]
=
experiment_config
[
'maxExecDuration'
]
request_data
[
'maxTrialNum'
]
=
experiment_config
[
'maxTrialNum'
]
request_data
[
'searchSpace'
]
=
experiment_config
[
'searchSpace'
]
request_data
[
'searchSpace'
]
=
experiment_config
.
get
(
'searchSpace'
)
request_data
[
'tuner'
]
=
experiment_config
[
'tuner'
]
if
'assessor'
in
experiment_config
:
request_data
[
'assessor'
]
=
experiment_config
[
'assessor'
]
...
...
@@ -97,16 +101,16 @@ def set_experiment(experiment_config, mode, port):
request_data
[
'clusterMetaData'
]
=
[]
if
experiment_config
[
'trainingServicePlatform'
]
==
'local'
:
request_data
[
'clusterMetaData'
].
append
(
{
'key'
:
'codeDir'
,
'value'
:
experiment_config
[
'trial'
][
'
trialC
odeDir'
]})
{
'key'
:
'codeDir'
,
'value'
:
experiment_config
[
'trial'
][
'
c
odeDir'
]})
request_data
[
'clusterMetaData'
].
append
(
{
'key'
:
'command'
,
'value'
:
experiment_config
[
'trial'
][
'
trialC
ommand'
]})
{
'key'
:
'command'
,
'value'
:
experiment_config
[
'trial'
][
'
c
ommand'
]})
else
:
request_data
[
'clusterMetaData'
].
append
(
{
'key'
:
'machine_list'
,
'value'
:
experiment_config
[
'machineList'
]})
value_dict
=
dict
()
value_dict
[
'command'
]
=
experiment_config
[
'trial'
][
'
trialC
ommand'
]
value_dict
[
'codeDir'
]
=
experiment_config
[
'trial'
][
'
trialC
odeDir'
]
value_dict
[
'gpuNum'
]
=
experiment_config
[
'trial'
][
'
trialG
puNum'
]
value_dict
[
'command'
]
=
experiment_config
[
'trial'
][
'
c
ommand'
]
value_dict
[
'codeDir'
]
=
experiment_config
[
'trial'
][
'
c
odeDir'
]
value_dict
[
'gpuNum'
]
=
experiment_config
[
'trial'
][
'
g
puNum'
]
request_data
[
'clusterMetaData'
].
append
(
{
'key'
:
'trial_config'
,
'value'
:
value_dict
})
...
...
@@ -117,23 +121,24 @@ def launch_experiment(args, experiment_config, mode, webuiport, experiment_id=No
'''follow steps to start rest server and start experiment'''
nni_config
=
Config
()
# start rest server
rest_process
=
start_rest_server
(
args
.
manager
,
REST_PORT
,
experiment_config
[
'trainingServicePlatform'
],
mode
,
experiment_id
)
rest_process
=
start_rest_server
(
REST_PORT
,
experiment_config
[
'trainingServicePlatform'
],
mode
,
experiment_id
)
nni_config
.
set_config
(
'restServerPid'
,
rest_process
.
pid
)
# Deal with annotation
if
experiment_config
.
get
(
'useAnnotation'
):
path
=
os
.
path
.
join
(
tempfile
.
gettempdir
(),
'nni'
,
'annotation'
)
if
os
.
path
.
isdir
(
path
):
shutil
.
rmtree
(
path
)
os
.
makedirs
(
path
)
expand_annotations
(
experiment_config
[
'trial'
][
'trialCodeDir'
],
path
)
experiment_config
[
'trial'
][
'trialCodeDir'
]
=
path
search_space
=
generate_search_space
(
experiment_config
[
'trial'
][
'trialCodeDir'
])
expand_annotations
(
experiment_config
[
'trial'
][
'codeDir'
],
path
)
experiment_config
[
'trial'
][
'codeDir'
]
=
path
search_space
=
generate_search_space
(
experiment_config
[
'trial'
][
'codeDir'
])
experiment_config
[
'searchSpace'
]
=
json
.
dumps
(
search_space
)
assert
search_space
,
ERROR_INFO
%
'Generated search space is empty'
elif
experiment_config
.
get
(
'searchSpacePath'
):
search_space
=
get_json_content
(
experiment_config
.
get
(
'searchSpacePath'
))
experiment_config
[
'searchSpace'
]
=
json
.
dumps
(
search_space
)
else
:
search_space
=
get_json_content
(
experiment_config
[
'searchSpacePath'
])
experiment_config
[
'searchSpace'
]
=
json
.
dumps
(
search_space
)
experiment_config
[
'searchSpace'
]
=
json
.
dumps
(
''
)
# check rest server
print_normal
(
'Checking restful server...'
)
...
...
@@ -142,7 +147,8 @@ def launch_experiment(args, experiment_config, mode, webuiport, experiment_id=No
else
:
print_error
(
'Restful server start failed!'
)
try
:
rest_process
.
kill
()
cmds
=
[
'pkill'
,
'-P'
,
str
(
rest_process
.
pid
)]
call
(
cmds
)
except
Exception
:
raise
Exception
(
ERROR_INFO
%
'Rest server stopped!'
)
exit
(
0
)
...
...
@@ -150,12 +156,14 @@ def launch_experiment(args, experiment_config, mode, webuiport, experiment_id=No
# set remote config
if
experiment_config
[
'trainingServicePlatform'
]
==
'remote'
:
print_normal
(
'Setting remote config...'
)
if
set_remote_config
(
experiment_config
,
REST_PORT
):
config_result
,
err_msg
=
set_remote_config
(
experiment_config
,
REST_PORT
)
if
config_result
:
print_normal
(
'Success!'
)
else
:
print_error
(
'Failed!
'
)
print_error
(
'Failed!
Error is: {}'
.
format
(
err_msg
)
)
try
:
rest_process
.
kill
()
cmds
=
[
'pkill'
,
'-P'
,
str
(
rest_process
.
pid
)]
call
(
cmds
)
except
Exception
:
raise
Exception
(
ERROR_INFO
%
'Rest server stopped!'
)
exit
(
0
)
...
...
@@ -168,7 +176,8 @@ def launch_experiment(args, experiment_config, mode, webuiport, experiment_id=No
else
:
print_error
(
'Failed!'
)
try
:
rest_process
.
kill
()
cmds
=
[
'pkill'
,
'-P'
,
str
(
rest_process
.
pid
)]
call
(
cmds
)
except
Exception
:
raise
Exception
(
ERROR_INFO
%
'Rest server stopped!'
)
exit
(
0
)
...
...
@@ -183,7 +192,8 @@ def launch_experiment(args, experiment_config, mode, webuiport, experiment_id=No
else
:
print_error
(
'Failed!'
)
try
:
rest_process
.
kill
()
cmds
=
[
'pkill'
,
'-P'
,
str
(
rest_process
.
pid
)]
call
(
cmds
)
except
Exception
:
raise
Exception
(
ERROR_INFO
%
'Rest server stopped!'
)
exit
(
0
)
...
...
@@ -213,9 +223,9 @@ def resume_experiment(args):
def
create_experiment
(
args
):
'''start a new experiment'''
nni_config
=
Config
()
experiment_config
=
get_yml_content
(
args
.
config
)
validate_all_content
(
experiment_config
)
config_path
=
os
.
path
.
abspath
(
args
.
config
)
experiment_config
=
get_yml_content
(
config
_path
)
validate_all_content
(
experiment_config
,
config_path
)
nni_config
.
set_config
(
'experimentConfig'
,
experiment_config
)
launch_experiment
(
args
,
experiment_config
,
'new'
,
args
.
webuiport
)
...
...
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment