#!/usr/bin/python # Copyright (c) Microsoft Corporation # All rights reserved. # # MIT License # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and # to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING # BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """A deep MNIST classifier using convolutional layers.""" import logging import math import tempfile import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data import nni FLAGS = None logger = logging.getLogger('mnist_AutoML') class MnistNetwork(object): ''' MnistNetwork is for initlizing and building basic network for mnist. ''' def __init__(self, channel_1_num, channel_2_num, pool_size, learning_rate, x_dim=784, y_dim=10): self.channel_1_num = channel_1_num self.channel_2_num = channel_2_num self.conv_size = nni.choice(2, 3, 5, 7, name='conv-size') self.hidden_size = nni.choice(124, 512, 1024) # example: without name self.pool_size = pool_size self.learning_rate = nni.uniform(0.0001, 0.1, name='learning_rate') self.x_dim = x_dim self.y_dim = y_dim self.images = tf.placeholder(tf.float32, [None, self.x_dim], name='input_x') self.labels = tf.placeholder(tf.float32, [None, self.y_dim], name='input_y') self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') self.train_step = None self.accuracy = None def build_network(self): ''' Building network for mnist ''' # Reshape to use within a convolutional neural net. # Last dimension is for "features" - there is only one here, since images are # grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. with tf.name_scope('reshape'): try: input_dim = int(math.sqrt(self.x_dim)) except: print( 'input dim cannot be sqrt and reshape. input dim: ' + str(self.x_dim)) logger.debug( 'input dim cannot be sqrt and reshape. input dim: %s', str(self.x_dim)) raise x_image = tf.reshape(self.images, [-1, input_dim, input_dim, 1]) # First convolutional layer - maps one grayscale image to 32 feature maps. with tf.name_scope('conv1'): w_conv1 = weight_variable( [self.conv_size, self.conv_size, 1, self.channel_1_num]) b_conv1 = bias_variable([self.channel_1_num]) h_conv1 = nni.function_choice( lambda: tf.nn.relu(conv2d(x_image, w_conv1) + b_conv1), lambda: tf.nn.sigmoid(conv2d(x_image, w_conv1) + b_conv1), lambda: tf.nn.tanh(conv2d(x_image, w_conv1) + b_conv1) ) # example: without name # Pooling layer - downsamples by 2X. with tf.name_scope('pool1'): h_pool1 = max_pool(h_conv1, self.pool_size) h_pool1 = nni.function_choice( lambda: max_pool(h_conv1, self.pool_size), lambda: avg_pool(h_conv1, self.pool_size), name='h_pool1') # Second convolutional layer -- maps 32 feature maps to 64. with tf.name_scope('conv2'): w_conv2 = weight_variable([self.conv_size, self.conv_size, self.channel_1_num, self.channel_2_num]) b_conv2 = bias_variable([self.channel_2_num]) h_conv2 = tf.nn.relu(conv2d(h_pool1, w_conv2) + b_conv2) # Second pooling layer. with tf.name_scope('pool2'): # example: another style h_pool2 = max_pool(h_conv2, self.pool_size) # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image # is down to 7x7x64 feature maps -- maps this to 1024 features. last_dim = int(input_dim / (self.pool_size * self.pool_size)) with tf.name_scope('fc1'): w_fc1 = weight_variable( [last_dim * last_dim * self.channel_2_num, self.hidden_size]) b_fc1 = bias_variable([self.hidden_size]) h_pool2_flat = tf.reshape( h_pool2, [-1, last_dim * last_dim * self.channel_2_num]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, w_fc1) + b_fc1) # Dropout - controls the complexity of the model, prevents co-adaptation of features. with tf.name_scope('dropout'): h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob) # Map the 1024 features to 10 classes, one for each digit with tf.name_scope('fc2'): w_fc2 = weight_variable([self.hidden_size, self.y_dim]) b_fc2 = bias_variable([self.y_dim]) y_conv = tf.matmul(h_fc1_drop, w_fc2) + b_fc2 with tf.name_scope('loss'): cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=y_conv)) with tf.name_scope('adam_optimizer'): self.train_step = tf.train.AdamOptimizer( self.learning_rate).minimize(cross_entropy) with tf.name_scope('accuracy'): correct_prediction = tf.equal( tf.argmax(y_conv, 1), tf.argmax(self.labels, 1)) self.accuracy = tf.reduce_mean( tf.cast(correct_prediction, tf.float32)) def conv2d(x_input, w_matrix): """conv2d returns a 2d convolution layer with full stride.""" return tf.nn.conv2d(x_input, w_matrix, strides=[1, 1, 1, 1], padding='SAME') def max_pool(x_input, pool_size): """max_pool downsamples a feature map by 2X.""" return tf.nn.max_pool(x_input, ksize=[1, pool_size, pool_size, 1], strides=[1, pool_size, pool_size, 1], padding='SAME') def avg_pool(x_input, pool_size): return tf.nn.avg_pool(x_input, ksize=[1, pool_size, pool_size, 1], strides=[1, pool_size, pool_size, 1], padding='SAME') def weight_variable(shape): """weight_variable generates a weight variable of a given shape.""" initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): """bias_variable generates a bias variable of a given shape.""" initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def main(params): ''' Main function, build mnist network, run and send result to NNI. ''' # Import data mnist = input_data.read_data_sets(params['data_dir'], one_hot=True) print('Mnist download data down.') logger.debug('Mnist download data down.') # Create the model # Build the graph for the deep net mnist_network = MnistNetwork(channel_1_num=params['channel_1_num'], channel_2_num=params['channel_2_num'], pool_size=params['pool_size']) mnist_network.build_network() logger.debug('Mnist build network done.') # Write log graph_location = tempfile.mkdtemp() logger.debug('Saving graph to: %s', graph_location) train_writer = tf.summary.FileWriter(graph_location) train_writer.add_graph(tf.get_default_graph()) test_acc = 0.0 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) batch_num = nni.choice(50, 250, 500, name='batch_num') for i in range(batch_num): batch = mnist.train.next_batch(batch_num) dropout_rate = nni.choice(1, 5, name='dropout_rate') mnist_network.train_step.run(feed_dict={mnist_network.images: batch[0], mnist_network.labels: batch[1], mnist_network.keep_prob: dropout_rate} ) if i % 100 == 0: test_acc = mnist_network.accuracy.eval( feed_dict={mnist_network.images: mnist.test.images, mnist_network.labels: mnist.test.labels, mnist_network.keep_prob: 1.0}) nni.report_intermediate_result(test_acc) logger.debug('test accuracy %g', test_acc) logger.debug('Pipe send intermediate result done.') test_acc = mnist_network.accuracy.eval( feed_dict={mnist_network.images: mnist.test.images, mnist_network.labels: mnist.test.labels, mnist_network.keep_prob: 1.0}) nni.report_final_result(test_acc) logger.debug('Final result is %g', test_acc) logger.debug('Send final result done.') def generate_defualt_params(): ''' Generate default parameters for mnist network. ''' params = { 'data_dir': '/tmp/tensorflow/mnist/input_data', 'channel_1_num': 32, 'channel_2_num': 64, 'pool_size': 2} return params if __name__ == '__main__': try: main(generate_defualt_params()) except Exception as exception: logger.exception(exception) raise