Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
nni
Commits
63aa63bd
Unverified
Commit
63aa63bd
authored
Jan 14, 2019
by
xuehui
Committed by
GitHub
Jan 14, 2019
Browse files
Remove unused example (#600)
* update README in metis and update RuntimeError * remove smart params
parent
a18b26f2
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
0 additions
and
316 deletions
+0
-316
examples/trials/mnist-smartparam/config.yml
examples/trials/mnist-smartparam/config.yml
+0
-20
examples/trials/mnist-smartparam/config_kubeflow.yml
examples/trials/mnist-smartparam/config_kubeflow.yml
+0
-31
examples/trials/mnist-smartparam/config_pai.yml
examples/trials/mnist-smartparam/config_pai.yml
+0
-35
examples/trials/mnist-smartparam/mnist.py
examples/trials/mnist-smartparam/mnist.py
+0
-230
No files found.
examples/trials/mnist-smartparam/config.yml
deleted
100644 → 0
View file @
a18b26f2
authorName
:
default
experimentName
:
example_mnist-smartparam
trialConcurrency
:
1
maxExecDuration
:
1h
maxTrialNum
:
10
#choice: local, remote, pai
trainingServicePlatform
:
local
#choice: true, false
useAnnotation
:
true
tuner
:
#choice: TPE, Random, Anneal, Evolution, BatchTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName
:
TPE
classArgs
:
#choice: maximize, minimize
optimize_mode
:
maximize
trial
:
command
:
python3 mnist.py
codeDir
:
.
gpuNum
:
0
examples/trials/mnist-smartparam/config_kubeflow.yml
deleted
100644 → 0
View file @
a18b26f2
authorName
:
default
experimentName
:
example_dist
trialConcurrency
:
1
maxExecDuration
:
1h
maxTrialNum
:
10
#choice: local, remote, pai, kubeflow
trainingServicePlatform
:
kubeflow
#choice: true, false
useAnnotation
:
true
tuner
:
#choice: TPE, Random, Anneal, Evolution
builtinTunerName
:
TPE
classArgs
:
#choice: maximize, minimize
optimize_mode
:
maximize
trial
:
codeDir
:
.
worker
:
replicas
:
1
command
:
python3 mnist.py
gpuNum
:
0
cpuNum
:
1
memoryMB
:
8192
image
:
msranni/nni:latest
kubeflowConfig
:
operator
:
tf-operator
apiVersion
:
v1alpha2
storage
:
nfs
nfs
:
server
:
10.10.10.10
path
:
/var/nfs/general
\ No newline at end of file
examples/trials/mnist-smartparam/config_pai.yml
deleted
100644 → 0
View file @
a18b26f2
authorName
:
default
experimentName
:
example_mnist-smartparam
trialConcurrency
:
1
maxExecDuration
:
1h
maxTrialNum
:
10
#choice: local, remote, pai
trainingServicePlatform
:
pai
#choice: true, false
useAnnotation
:
true
tuner
:
#choice: TPE, Random, Anneal, Evolution, BatchTuner
#SMAC (SMAC should be installed through nnictl)
builtinTunerName
:
TPE
classArgs
:
#choice: maximize, minimize
optimize_mode
:
maximize
trial
:
command
:
python3 mnist.py
codeDir
:
.
gpuNum
:
0
cpuNum
:
1
memoryMB
:
8196
#The docker image to run nni job on pai
image
:
msranni/nni:latest
#The hdfs directory to store data on pai, format 'hdfs://host:port/directory'
dataDir
:
hdfs://10.10.10.10:9000/username/nni
#The hdfs directory to store output data generated by nni, format 'hdfs://host:port/directory'
outputDir
:
hdfs://10.10.10.10:9000/username/nni
paiConfig
:
#The username to login pai
userName
:
username
#The password to login pai
passWord
:
password
#The host of restful server of pai
host
:
10.10.10.10
\ No newline at end of file
examples/trials/mnist-smartparam/mnist.py
deleted
100644 → 0
View file @
a18b26f2
"""A deep MNIST classifier using convolutional layers."""
import
logging
import
math
import
tempfile
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
import
nni
FLAGS
=
None
logger
=
logging
.
getLogger
(
'mnist_AutoML'
)
class
MnistNetwork
(
object
):
'''
MnistNetwork is for initlizing and building basic network for mnist.
'''
def
__init__
(
self
,
channel_1_num
,
channel_2_num
,
pool_size
,
x_dim
=
784
,
y_dim
=
10
):
self
.
channel_1_num
=
channel_1_num
self
.
channel_2_num
=
channel_2_num
self
.
conv_size
=
nni
.
choice
(
2
,
3
,
5
,
7
,
name
=
'conv-size'
)
self
.
hidden_size
=
nni
.
choice
(
124
,
512
,
1024
)
# example: without name
self
.
pool_size
=
pool_size
self
.
learning_rate
=
nni
.
uniform
(
0.0001
,
0.1
,
name
=
'learning_rate'
)
self
.
x_dim
=
x_dim
self
.
y_dim
=
y_dim
self
.
images
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
x_dim
],
name
=
'input_x'
)
self
.
labels
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
y_dim
],
name
=
'input_y'
)
self
.
keep_prob
=
tf
.
placeholder
(
tf
.
float32
,
name
=
'keep_prob'
)
self
.
train_step
=
None
self
.
accuracy
=
None
def
build_network
(
self
):
'''
Building network for mnist
'''
# Reshape to use within a convolutional neural net.
# Last dimension is for "features" - there is only one here, since images are
# grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
with
tf
.
name_scope
(
'reshape'
):
try
:
input_dim
=
int
(
math
.
sqrt
(
self
.
x_dim
))
except
:
print
(
'input dim cannot be sqrt and reshape. input dim: '
+
str
(
self
.
x_dim
))
logger
.
debug
(
'input dim cannot be sqrt and reshape. input dim: %s'
,
str
(
self
.
x_dim
))
raise
x_image
=
tf
.
reshape
(
self
.
images
,
[
-
1
,
input_dim
,
input_dim
,
1
])
# First convolutional layer - maps one grayscale image to 32 feature maps.
with
tf
.
name_scope
(
'conv1'
):
w_conv1
=
weight_variable
(
[
self
.
conv_size
,
self
.
conv_size
,
1
,
self
.
channel_1_num
])
b_conv1
=
bias_variable
([
self
.
channel_1_num
])
h_conv1
=
nni
.
function_choice
(
lambda
:
tf
.
nn
.
relu
(
conv2d
(
x_image
,
w_conv1
)
+
b_conv1
),
lambda
:
tf
.
nn
.
sigmoid
(
conv2d
(
x_image
,
w_conv1
)
+
b_conv1
),
lambda
:
tf
.
nn
.
tanh
(
conv2d
(
x_image
,
w_conv1
)
+
b_conv1
)
)
# example: without name
# Pooling layer - downsamples by 2X.
with
tf
.
name_scope
(
'pool1'
):
h_pool1
=
max_pool
(
h_conv1
,
self
.
pool_size
)
h_pool1
=
nni
.
function_choice
(
lambda
:
max_pool
(
h_conv1
,
self
.
pool_size
),
lambda
:
avg_pool
(
h_conv1
,
self
.
pool_size
),
name
=
'h_pool1'
)
# Second convolutional layer -- maps 32 feature maps to 64.
with
tf
.
name_scope
(
'conv2'
):
w_conv2
=
weight_variable
([
self
.
conv_size
,
self
.
conv_size
,
self
.
channel_1_num
,
self
.
channel_2_num
])
b_conv2
=
bias_variable
([
self
.
channel_2_num
])
h_conv2
=
tf
.
nn
.
relu
(
conv2d
(
h_pool1
,
w_conv2
)
+
b_conv2
)
# Second pooling layer.
with
tf
.
name_scope
(
'pool2'
):
# example: another style
h_pool2
=
max_pool
(
h_conv2
,
self
.
pool_size
)
# Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
# is down to 7x7x64 feature maps -- maps this to 1024 features.
last_dim
=
int
(
input_dim
/
(
self
.
pool_size
*
self
.
pool_size
))
with
tf
.
name_scope
(
'fc1'
):
w_fc1
=
weight_variable
(
[
last_dim
*
last_dim
*
self
.
channel_2_num
,
self
.
hidden_size
])
b_fc1
=
bias_variable
([
self
.
hidden_size
])
h_pool2_flat
=
tf
.
reshape
(
h_pool2
,
[
-
1
,
last_dim
*
last_dim
*
self
.
channel_2_num
])
h_fc1
=
tf
.
nn
.
relu
(
tf
.
matmul
(
h_pool2_flat
,
w_fc1
)
+
b_fc1
)
# Dropout - controls the complexity of the model, prevents co-adaptation of features.
with
tf
.
name_scope
(
'dropout'
):
h_fc1_drop
=
tf
.
nn
.
dropout
(
h_fc1
,
self
.
keep_prob
)
# Map the 1024 features to 10 classes, one for each digit
with
tf
.
name_scope
(
'fc2'
):
w_fc2
=
weight_variable
([
self
.
hidden_size
,
self
.
y_dim
])
b_fc2
=
bias_variable
([
self
.
y_dim
])
y_conv
=
tf
.
matmul
(
h_fc1_drop
,
w_fc2
)
+
b_fc2
with
tf
.
name_scope
(
'loss'
):
cross_entropy
=
tf
.
reduce_mean
(
tf
.
nn
.
softmax_cross_entropy_with_logits
(
labels
=
self
.
labels
,
logits
=
y_conv
))
with
tf
.
name_scope
(
'adam_optimizer'
):
self
.
train_step
=
tf
.
train
.
AdamOptimizer
(
self
.
learning_rate
).
minimize
(
cross_entropy
)
with
tf
.
name_scope
(
'accuracy'
):
correct_prediction
=
tf
.
equal
(
tf
.
argmax
(
y_conv
,
1
),
tf
.
argmax
(
self
.
labels
,
1
))
self
.
accuracy
=
tf
.
reduce_mean
(
tf
.
cast
(
correct_prediction
,
tf
.
float32
))
def
conv2d
(
x_input
,
w_matrix
):
"""conv2d returns a 2d convolution layer with full stride."""
return
tf
.
nn
.
conv2d
(
x_input
,
w_matrix
,
strides
=
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
)
def
max_pool
(
x_input
,
pool_size
):
"""max_pool downsamples a feature map by 2X."""
return
tf
.
nn
.
max_pool
(
x_input
,
ksize
=
[
1
,
pool_size
,
pool_size
,
1
],
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
avg_pool
(
x_input
,
pool_size
):
return
tf
.
nn
.
avg_pool
(
x_input
,
ksize
=
[
1
,
pool_size
,
pool_size
,
1
],
strides
=
[
1
,
pool_size
,
pool_size
,
1
],
padding
=
'SAME'
)
def
weight_variable
(
shape
):
"""weight_variable generates a weight variable of a given shape."""
initial
=
tf
.
truncated_normal
(
shape
,
stddev
=
0.1
)
return
tf
.
Variable
(
initial
)
def
bias_variable
(
shape
):
"""bias_variable generates a bias variable of a given shape."""
initial
=
tf
.
constant
(
0.1
,
shape
=
shape
)
return
tf
.
Variable
(
initial
)
def
main
(
params
):
'''
Main function, build mnist network, run and send result to NNI.
'''
# Import data
mnist
=
input_data
.
read_data_sets
(
params
[
'data_dir'
],
one_hot
=
True
)
print
(
'Mnist download data down.'
)
logger
.
debug
(
'Mnist download data down.'
)
# Create the model
# Build the graph for the deep net
mnist_network
=
MnistNetwork
(
channel_1_num
=
params
[
'channel_1_num'
],
channel_2_num
=
params
[
'channel_2_num'
],
pool_size
=
params
[
'pool_size'
])
mnist_network
.
build_network
()
logger
.
debug
(
'Mnist build network done.'
)
# Write log
graph_location
=
tempfile
.
mkdtemp
()
logger
.
debug
(
'Saving graph to: %s'
,
graph_location
)
train_writer
=
tf
.
summary
.
FileWriter
(
graph_location
)
train_writer
.
add_graph
(
tf
.
get_default_graph
())
test_acc
=
0.0
with
tf
.
Session
()
as
sess
:
sess
.
run
(
tf
.
global_variables_initializer
())
batch_size
=
nni
.
choice
(
1
,
4
,
8
,
16
,
32
,
name
=
'batch_size'
)
for
i
in
range
(
2000
):
batch
=
mnist
.
train
.
next_batch
(
batch_size
)
dropout_rate
=
nni
.
choice
(
0.5
,
0.9
,
name
=
'dropout_rate'
)
mnist_network
.
train_step
.
run
(
feed_dict
=
{
mnist_network
.
images
:
batch
[
0
],
mnist_network
.
labels
:
batch
[
1
],
mnist_network
.
keep_prob
:
1
-
dropout_rate
}
)
if
i
%
100
==
0
:
test_acc
=
mnist_network
.
accuracy
.
eval
(
feed_dict
=
{
mnist_network
.
images
:
mnist
.
test
.
images
,
mnist_network
.
labels
:
mnist
.
test
.
labels
,
mnist_network
.
keep_prob
:
1.0
})
nni
.
report_intermediate_result
(
test_acc
)
logger
.
debug
(
'test accuracy %g'
,
test_acc
)
logger
.
debug
(
'Pipe send intermediate result done.'
)
test_acc
=
mnist_network
.
accuracy
.
eval
(
feed_dict
=
{
mnist_network
.
images
:
mnist
.
test
.
images
,
mnist_network
.
labels
:
mnist
.
test
.
labels
,
mnist_network
.
keep_prob
:
1.0
})
nni
.
report_final_result
(
test_acc
)
logger
.
debug
(
'Final result is %g'
,
test_acc
)
logger
.
debug
(
'Send final result done.'
)
def
generate_defualt_params
():
'''
Generate default parameters for mnist network.
'''
params
=
{
'data_dir'
:
'/tmp/tensorflow/mnist/input_data'
,
'channel_1_num'
:
32
,
'channel_2_num'
:
64
,
'pool_size'
:
2
}
return
params
if
__name__
==
'__main__'
:
try
:
nni
.
get_next_parameter
()
main
(
generate_defualt_params
())
except
Exception
as
exception
:
logger
.
exception
(
exception
)
raise
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment