"test/vscode:/vscode.git/clone" did not exist on "3f6a8274a97bf003b5eadc05faa324162b7f4123"
Commit a2b2c56d authored by Deshui Yu's avatar Deshui Yu Committed by fishyds
Browse files

Remove unused files

parent 0cea39c5
Installation instructions
===
## install using deb file
TBD
## install from source code
* Prepare Node.js 10.9.0 or above
wget https://nodejs.org/dist/v10.9.0/node-v10.9.0-linux-x64.tar.xz
tar xf node-v10.9.0-linux-x64.tar.xz
mv node-v10.9.0-linux-x64/* /usr/local/node/
* Prepare Yarn 1.9.4 or above
wget https://github.com/yarnpkg/yarn/releases/download/v1.9.4/yarn-v1.9.4.tar.gz
tar xf yarn-v1.9.4.tar.gz
mv yarn-v1.9.4/* /usr/local/yarn/
* Add Node.js and Yarn in PATH
export PATH=/usr/local/node/bin:/usr/local/yarn/bin:$PATH
* clone nni source code
git clone https://github.com/Microsoft/NeuralNetworkIntelligence
* build and install nni
make build
sudo make install
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Builds the CIFAR-10 network.
Summary of available functions:
# Compute input images and labels for training. If you would like to run
# evaluations, use inputs() instead.
inputs, labels = distorted_inputs()
# Compute inference on the model inputs to make a prediction.
predictions = inference(inputs)
# Compute the total loss of the prediction with respect to the labels.
loss = loss(predictions, labels)
# Create a graph to run one step of training with respect to the loss.
train_op = train(loss, global_step)
"""
import argparse
import os
import re
import tarfile
import urllib
import tensorflow as tf
import cifar10_input
parser = argparse.ArgumentParser()
# Basic model parameters.
parser.add_argument('--batch_size', type=int, default=512,
help='Number of images to process in a batch.')
parser.add_argument('--data_dir', type=str, default='/tmp/cifar10_data',
help='Path to the CIFAR-10 data directory.')
parser.add_argument('--use_fp16', type=bool, default=False,
help='Train the model using fp16.')
FLAGS = parser.parse_args()
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
NUM_CLASSES = cifar10_input.NUM_CLASSES
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = cifar10_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.1 # Initial learning rate.
# If a model is trained with multiple GPUs, prefix all Op names with tower_name
# to differentiate the operations. Note that this prefix is removed from the
# names of the summaries when visualizing a model.
TOWER_NAME = 'tower'
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
def _activation_summary(x_input):
"""Helper to create summaries for activations.
Creates a summary that provides a histogram of activations.
Creates a summary that measures the sparsity of activations.
Args:
x_input: Tensor
Returns:
nothing
"""
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x_input.op.name)
tf.summary.histogram(tensor_name + '/activations', x_input)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x_input))
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = tf.get_variable(
name, shape, initializer=initializer, dtype=dtype)
return var
def _variable_with_weight_decay(name, shape, stddev, l2loss_wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
l2loss_wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if l2loss_wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), l2loss_wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def distorted_inputs():
"""Construct distorted input for CIFAR training using the Reader ops.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
FLAGS.data_dir = './'
print(FLAGS.data_dir)
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def inputs(eval_data):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
Raises:
ValueError: If no data_dir
"""
FLAGS.data_dir = './'
if not FLAGS.data_dir:
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
if eval_data is None:
images, labels = cifar10_input.distorted_inputs(data_dir=data_dir,
batch_size=FLAGS.batch_size)
else:
images, labels = cifar10_input.inputs(eval_data=eval_data,
data_dir=data_dir,
batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return images, labels
def maybe_download_and_extract():
"""
Download and extract the tarball from Alex's website.
"""
FLAGS.data_dir = './'
dest_directory = FLAGS.data_dir
print(dest_directory)
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
print('\r>> Downloading %s %.1f%%' % (filename, float(
count * block_size) / float(total_size) * 100.0))
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Routine for decoding the CIFAR-10 binary file format."""
import os
import types
import tensorflow as tf
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
IMAGE_SIZE = 24
# Global constants describing the CIFAR-10 data set.
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
def read_cifar10(filename_queue):
"""
Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function N times.
This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
result = types.SimpleNamespace()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(tf.strided_slice(
record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(tf.strided_slice(record_bytes,
[label_bytes],
[label_bytes + image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
"""
Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
num_preprocess_threads = 16
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size)
return images, tf.reshape(label_batch, [batch_size])
def distorted_inputs(data_dir, batch_size):
"""Construct distorted input for CIFAR training using the Reader ops.
Args:
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in range(1, 6)]
for file in filenames:
if not tf.gfile.Exists(file):
raise ValueError('Failed to find file: ' + file)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing
# the order their operation.
# NOTE: since per_image_standardization zeros the mean and makes
# the stddev unit, this likely has no effect see tensorflow#1458.
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(
distorted_image, lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(distorted_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples = 0.4
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
min_fraction_of_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=True)
def inputs(eval_data, data_dir, batch_size):
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
if not eval_data:
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in range(1, 6)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'test_batch.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for file in filenames:
if not tf.gfile.Exists(file):
raise ValueError('Failed to find file: ' + file)
# Create a queue that produces the filenames to read.
filename_queue = tf.train.string_input_producer(filenames)
# Read examples from files in the filename queue.
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(
reshaped_image, height, width)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(resized_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples = 0.4
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(float_image, read_input.label,
min_queue_examples, batch_size,
shuffle=False)
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
Building the cifar10 network, run and send result to NNI.
'''
import logging
import tensorflow as tf
import nni
import cifar10
_logger = logging.getLogger("cifar10_automl")
NUM_CLASS = 10
MAX_BATCH_NUM = 5000
#MAX_BATCH_NUM = 50
def activation_functions(act):
'''
Choose activation function by index
'''
if act == 1:
return tf.nn.softmax
if act == 2:
return tf.nn.tanh
if act == 3:
return tf.nn.relu
if act == 4:
return tf.nn.relu
if act == 5:
return tf.nn.elu
if act == 6:
return tf.nn.leaky_relu
return None
def get_optimizer(opt):
'''
Return optimizer by index
'''
if opt == 1:
return tf.train.GradientDescentOptimizer
if opt == 2:
return tf.train.RMSPropOptimizer
if opt == 3:
return tf.train.AdagradOptimizer
if opt == 4:
return tf.train.AdadeltaOptimizer
if opt == 5:
return tf.train.AdamOptimizer
assert False
return None
class Cifar10(object):
'''
Class Cifar10 could build and run network for cifar10.
'''
def __init__(self):
# Place holder
self.is_train = tf.placeholder('int32')
self.keep_prob1 = tf.placeholder('float', name='xa')
self.keep_prob2 = tf.placeholder('float', name='xb')
self.accuracy = None
self.train_op = None
def build_network(self, config):
"""
Build network for CIFAR-10 and train.
"""
num_classes = NUM_CLASS
batch_size = config['batch_size']
num_units = config['conv_units_size']
conv_size = config['conv_size']
num_blocks = config['num_blocks']
initial_method = config['initial_method']
act_notlast = config['act_notlast']
pool_size = config['pool_size']
hidden_size = config['hidden_size']
act = config['act']
learning_rate = config['learning_rate']
opt = get_optimizer(config['optimizer'])
is_train = self.is_train
keep_prob1 = self.keep_prob1
keep_prob2 = self.keep_prob2
# Get images and labels for CIFAR-10.
with tf.device('/cpu:0'):
images, labels = cifar10.distorted_inputs()
test_images, test_labels = cifar10.inputs('test')
# Choose test set or train set by is_train
images = images * tf.cast(is_train, tf.float32) + \
(1-tf.cast(is_train, tf.float32)) * test_images
labels = labels * is_train + (1 - is_train) * test_labels
input_vec = tf.slice(images, [0, 0, 0, 0], [batch_size, 24, 24, 3])
output = tf.slice(labels, [0], [batch_size])
output = tf.one_hot(output, num_classes)
input_units = 3
for num in range(num_blocks):
if initial_method == 1:
conv_layer = tf.Variable(tf.truncated_normal(shape=[conv_size, conv_size,
input_units, num_units],
stddev=1.0 / num_units))
else:
conv_layer = tf.Variable(tf.random_uniform(shape=[conv_size, conv_size,
input_units, num_units],
minval=-0.05, maxval=0.05))
input_units = num_units
input_vec = tf.nn.conv2d(input_vec, conv_layer, strides=[1, 1, 1, 1], padding='SAME')
act_no_f = activation_functions(act_notlast)
input_vec = act_no_f(input_vec)
input_vec = tf.layers.batch_normalization(input_vec)
input_vec = tf.nn.dropout(input_vec, keep_prob=keep_prob1)
if num >= num_blocks - 2:
input_vec = tf.nn.max_pool(input_vec, ksize=[1, pool_size, pool_size, 1],
strides=[1, 2, 2, 1], padding='SAME')
num_units = num_units * 2
input_vec = tf.contrib.layers.flatten(input_vec)
input_vec = tf.layers.dense(
input_vec, hidden_size, activation=activation_functions(act))
input_vec = tf.layers.batch_normalization(input_vec)
input_vec = tf.nn.dropout(input_vec, keep_prob=keep_prob2)
input_vec = tf.layers.dense(input_vec, num_classes)
logit = tf.nn.softmax_cross_entropy_with_logits(
logits=input_vec, labels=output)
loss = tf.reduce_mean(logit)
accuracy = tf.equal(tf.argmax(input_vec, 1), tf.argmax(output, 1))
self.accuracy = tf.reduce_mean(
tf.cast(accuracy, "float")) # add a reduce_mean
self.train_op = opt(learning_rate=learning_rate).minimize(loss)
def train(self, config):
"""
train the cifar10 network
"""
_logger.debug('Config is: %s', str(config))
assert config['batch_size']
assert config['conv_units_size']
assert config['conv_size']
assert config['num_blocks']
assert config['initial_method']
assert config['act_notlast']
assert config['pool_size']
assert config['hidden_size']
assert config['act']
assert config['dropout']
assert config['learning_rate']
assert config['optimizer']
self.build_network(config)
with tf.Session() as sess:
# Initialize variables
tf.initialize_all_variables().run()
_logger.debug('Initialize all variables done.')
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess, coord)
cnt = 0
for cnt in range(MAX_BATCH_NUM):
cnt = cnt + 1
if cnt % 2000 == 0:
_logger.debug('Runing in batch %s', str(cnt))
acc = sess.run(self.accuracy, feed_dict={self.is_train: 0,
self.keep_prob1: 1.0,
self.keep_prob2: 1.0})
# Send intermediate result
nni.report_intermediate_result(acc)
_logger.debug('Report intermediate result done.')
sess.run(self.train_op, feed_dict={self.is_train: 1,
self.keep_prob1: 1 - config['dropout'],
self.keep_prob2: config['dropout']})
coord.request_stop()
coord.join(threads)
# Send final result
nni.report_final_result(acc)
_logger.debug('Training cifar10 done.')
def get_default_params():
'''
Return default parameters.
'''
config = {}
config['learning_rate'] = 0.1
config['batch_size'] = 512
config['num_epochs'] = 100
config['dropout'] = 0.5
config['hidden_size'] = 1682
config['conv_size'] = 5
config['num_blocks'] = 3
config['conv_units_size'] = 32
config['pool_size'] = 3
config['act_notlast'] = 5
config['act'] = 2
config['optimizer'] = 5
config['initial_method'] = 2
return config
if __name__ == '__main__':
try:
RCV_CONFIG = nni.get_parameters()
_logger.debug(RCV_CONFIG)
cifar10.maybe_download_and_extract()
train_cifar10 = Cifar10()
params = get_default_params()
params.update(RCV_CONFIG)
train_cifar10.train(params)
except Exception as exception:
_logger.exception(exception)
raise
authorName: default
experimentName: example_cifar10
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 1
#choice: local, remote
trainingServicePlatform: local
searchSpacePath: /usr/share/nni/examples/trials/cifar10/search_space.json
#choice: true, false
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution
tunerName: TPE
#choice: Maximize, Minimize
optimizationMode: Maximize
trial:
trialCommand: python3 cifar10.py
trialCodeDir: /usr/share/nni/examples/trials/cifar10
trialGpuNum: 0
\ No newline at end of file
authorName: default
experimentName: example_cifar10
trialConcurrency: 1
maxExecDuration: 1h
maxTrialNum: 1
#choice: local, remote
trainingServicePlatform: local
searchSpacePath: /usr/share/nni/examples/trials/cifar10/search_space.json
#choice: true, false
useAnnotation: false
tuner:
#choice: TPE, Random, Anneal, Evolution
tunerName: TPE
#choice: Maximize, Minimize
optimizationMode: Maximize
assessor:
#choice: Medianstop
assessorName: Medianstop
#choice: Maximize, Minimize
optimizationMode: Maximize
trial:
trialCommand: python3 cifar10.py
trialCodeDir: /usr/share/nni/examples/trials/cifar10
trialGpuNum: 0
\ No newline at end of file
{
"dropout":{"_type":"uniform","_value":[0, 1]},
"dropout_notlast":{"_type":"uniform","_value":[0, 1]},
"learning_rate":{"_type":"uniform", "_value":[0.0001, 1]},
"batch_size":{"_type":"choice", "_value":[50, 100, 200, 300, 400, 500]},
"hidden_size":{"_type":"choice", "_value":[100, 200, 500, 1000, 2000]},
"conv_size":{"_type":"choice", "_value":[1, 3, 5, 7]},
"conv_units_size":{"_type":"choice", "_value":[16, 32, 64]},
"num_blocks":{"_type":"choice", "_value":[1, 2, 3, 4, 5, 6, 7]},
"act_notlast":{"_type":"choice", "_value":[1, 2, 3, 4, 5, 6]},
"act":{"_type":"choice", "_value":[1, 2, 3, 4, 5, 6]},
"optimizer":{"_type":"choice", "_value": [1, 2, 3, 4, 5]},
"initial_method":{"_type":"choice", "_value":[1, 2]}
}
# Customized Tuner for Experts
*Tuner receive result from Trial as a matric to evaluate the performance of a specific parameters/architecture configure. And tuner send next hyper-parameter or architecture configure to Trial.*
So, if user want to implement a customized Tuner, she/he only need to:
**1) Inherit a tuner of a base Tuner class**
```python
from nni.tuner import Tuner
class CustomizedTuner(Tuner):
def __init__(self, ...):
...
```
**2) Implement receive trial result function**
```python
from nni.tuner import Tuner
class CustomizedTuner(Tuner):
def __init__(self, ...):
...
def receive_trial_result(self, parameter_id, parameters, reward):
'''
Record an observation of the objective function
'''
# you code implements here.
...
```
**3) Implement generate parameter function**
```python
from nni.tuner import Tuner
class CustomizedTuner(Tuner):
def __init__(self, ...):
...
def receive_trial_result(self, parameter_id, parameters, reward):
'''
Record an observation of the objective function
parameter_id: int
parameters: object created by 'generate_parameters()'
reward: object reported by trial
'''
# your code implements here.
...
def generate_parameters(self, parameter_id):
'''
Returns a set of trial (hyper-)parameters, as a serializable object
parameter_id: int
'''
# your code implements here.
...
```
**4) Write a script to run Tuner**
```python
import argparse
import CustomizedTuner
def main():
parser = argparse.ArgumentParser(description='parse command line parameters.')
# parse your tuner arg here.
...
FLAGS, unparsed = parser.parse_known_args()
tuner = CustomizedTuner(...)
tuner.run()
main()
```
Please noted in **2)** and **3)**. The parameter configures from ```generate_parameters``` function, will be package as json object by nni SDK. And nni SDK will unpack json object so the Trial will receive the exact same configure from Tuner.
User could override the ```run``` function in ```CustomizedTuner``` class, which could help user to control the process logic in Tuner, such as control handle request from Trial.
```receive_trial_result``` will receive ```the parameter_id, parameters, reward``` as parameters input. Also, Tuner will receive the ```reward``` object are exactly same reward that Trial send.
More detail example you could see:
> * [evlution-tuner](https://msrasrg.visualstudio.com/NeuralNetworkIntelligenceOpenSource/_git/Default?path=%2Fsrc%2Fsdk%2Fpynni%2Fnni%2Fevolution_tuner&version=GBmaster)
> * [hyperopt-tuner](https://msrasrg.visualstudio.com/NeuralNetworkIntelligenceOpenSource/_git/Default?path=%2Fsrc%2Fsdk%2Fpynni%2Fnni%2Fhyperopt_tuner&version=GBmaster)
> * [ga-customer-tuner](https://msrasrg.visualstudio.com/NeuralNetworkIntelligenceOpenSource/_git/Default?path=%2Fexamples%2Ftuners%2Fga_customer_tuner&version=GBmaster)
\ No newline at end of file
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import logging
from customer_tuner import CustomerTuner, OptimizeMode
logger = logging.getLogger('nni.ga_customer_tuner')
logger.debug('START')
def main():
parser = argparse.ArgumentParser(description='parse command line parameters.')
parser.add_argument('--optimize_mode', type=str, default='maximize',
help='Select optimize mode for Tuner: minimize or maximize.')
FLAGS, unparsed = parser.parse_known_args()
if FLAGS.optimize_mode not in [ mode.value for mode in OptimizeMode ]:
raise AttributeError('Unsupported optimize mode "%s"' % FLAGS.optimize_mode)
tuner = CustomerTuner(FLAGS.optimize_mode)
tuner.run()
try:
main()
except Exception as e:
logger.exception(e)
raise
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from nni.tuner import Tuner
class TestTuner(Tuner):
def generate_parameters(self, trial_id):
return {'lr':0.01}
def receive_trial_result(self, parameter_id, parameters, reward):
pass
def update_search_space(self, search_space):
return True
TestTuner().run()
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import logging
from .darkopt_assessor import DarkoptAssessor, OptimizeMode
logger = logging.getLogger('nni.contribution.darkopt_assessor')
logger.debug('START')
def _main():
# run accessor for mnist:
# python -m nni.contribution.darkopt_assessor --best_score=0.90 --period=200 --threshold=0.9 --optimize_mode=maximize
parser = argparse.ArgumentParser(
description='parse command line parameters.')
parser.add_argument('--best_score', type=float,
help='Expected best score for Assessor.')
parser.add_argument('--period', type=int,
help='Expected period for Assessor.')
parser.add_argument('--threshold', type=float,
help='Threshold for Assessor.')
parser.add_argument('--optimize_mode', type=str, default='maximize',
help='Select optimize mode for Assessor: minimize or maximize.')
FLAGS, unparsed = parser.parse_known_args()
if FLAGS.optimize_mode not in [ mode.value for mode in OptimizeMode ]:
raise AttributeError('Unsupported optimzie mode "%s"' % FLAGS.optimize_mode)
logger.debug('params:' + str(FLAGS))
assessor = DarkoptAssessor(FLAGS.best_score, FLAGS.period, FLAGS.threshold, OptimizeMode(FLAGS.optimize_mode))
assessor.run()
try:
_main()
except Exception as exception:
logger.exception(exception)
\ No newline at end of file
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import pymc3
import scipy.stats
class EnsembleSamplingPredictor(object):
def __init__(self):
self.traces = None
def fit(self, x, y):
x = np.asarray(x)
y = np.asarray(y)
self.traces = sample_ensemble(x, y)
def predict_proba_less_than(self, x, y):
return np.mean([
predict_proba_less_than_ensemble(x, y, trace)
for trace in self.traces
], axis=0)
curves = [
('vapore_pressure', 3, lambda x, p: p[0] * np.exp(p[1] / (1 + x) + p[2] * np.log1p(x))),
('weibull', 3, lambda x, p: p[0] - p[1] * np.exp(-p[2] * x)),
]
def _single(x, y, curve):
name, n_params, func = curve
with pymc3.Model() as model_single:
params = pymc3.Flat(name, shape=n_params)
mu = func(x, params)
sd = pymc3.Uniform('sd', lower=1e-9, upper=1e-1)
pymc3.Normal('y_obs', mu=mu, sd=sd, observed=y)
map_estimate = pymc3.find_MAP()
return map_estimate[name]
def sample_ensemble(x, y):
start = { curve[0]: _single(x, y, curve) for curve in curves }
start['weights_unnormalized_interval_'] = np.zeros(len(curves))
start['sd_interval_'] = 0
with pymc3.Model() as model_ensemble:
mu_single = []
for name, n_params, func in curves:
params = pymc3.Flat(name, shape=n_params)
mu_single.append(func(x, params))
weights_unnormalized = pymc3.Uniform(
'weights_unnnormalized', lower=0, upper=1, shape=len(curves))
weights_normalized = pymc3.Deterministic(
'weights_normalized', weights_unnormalized / weights_unnormalized.sum())
mu_ensemble = weights_normalized.dot(mu_single)
sd1 = pymc3.Uniform('sd', lower=1e-9, upper=1e-1)
pymc3.Deterministic('sd1', sd1)
pymc3.Normal('y_obs', mu=mu_ensemble, observed=y, sd=sd1)
return pymc3.sample(start=start, step=pymc3.Metropolis(), draws=1000)
def predict_proba_less_than_ensemble(x, y, param):
ps = [func(x, param[name]) for name, _, func in curves]
mu = param['weights_normalized'].dot(ps)
sd1 = param['sd1']
return scipy.stats.norm.cdf(y, loc=mu, scale=sd1)
\ No newline at end of file
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from nni.assessor import Assessor, AssessResult
from .darkopt2 import EnsembleSamplingPredictor
from enum import Enum
import logging
logger = logging.getLogger('nni.contribution.darkopt_assessor')
class OptimizeMode(Enum):
Maximize = 'maximize'
Minimize = 'minimize'
class DarkoptAssessor(Assessor):
def __init__(self, best_score, period, threshold, optimize_mode):
self.best_score = best_score
self.period = period
self.threshold = threshold
self.optimize_mode = optimize_mode
self.predictor = EnsembleSamplingPredictor()
if self.optimize_mode is OptimizeMode.Minimize:
self.best_score = -self.best_score
def assess_trial(self, trial_job_id, history):
'''
assess_trial
'''
logger.debug('assess_trial %s' % history)
if self.optimize_mode is OptimizeMode.Minimize:
history = [ -x for x in history ]
max_ = max(history)
if max_ > self.best_score:
self.best_score = max_
return AssessResult.Good
self.predictor.fit(list(range(len(history))), history)
proba_worse = self.predictor.predict_proba_less_than(self.period, self.best_score)
if proba_worse > self.threshold:
return AssessResult.Bad
else:
return AssessResult.Good
\ No newline at end of file
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import logging
import random
from enum import Enum
from .darkopt_assessor import DarkoptAssessor
from nni.assessor import AssessResult
logger = logging.getLogger('nni.contrib.darkopt_assessor')
logger.debug('START')
class OptimizeMode(Enum):
Maximize = 'maximize'
Minimize = 'minimize'
def test():
'''
tests.
'''
# run accessor for mnist:
# python -m nni.contribution.darkopt_assessor --best_score=0.90 --period=200 --threshold=0.9 --optimize_mode=maximize
parser = argparse.ArgumentParser(
description='parse command line parameters.')
parser.add_argument('--best_score', type=float,
help='Expected best score for Assessor.')
parser.add_argument('--period', type=int,
help='Expected period for Assessor.')
parser.add_argument('--threshold', type=float,
help='Threshold for Assessor.')
parser.add_argument('--optimize_mode', type=str, default='maximize',
help='Select optimize mode for Assessor: minimize or maximize.')
FLAGS, unparsed = parser.parse_known_args()
if FLAGS.optimize_mode not in [ mode.value for mode in OptimizeMode ]:
raise AttributeError('Unsupported optimzie mode "%s"' % FLAGS.optimize_mode)
lcs = [[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1],
[0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2],
[0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3],
[0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4,0.4]]
#lcs = [[1,1,1,1,1,1,1,1,1,1],
# [1,1,1,1,1,1,1,1,1,1],
# [1,1,1,1,1,1,1,1,1,1]]
assessor = DarkoptAssessor(FLAGS.best_score, FLAGS.period, FLAGS.threshold, FLAGS.optimize_mode)
for i in range(4):
#lc = []
for k in range(10):
#d = random.randint(i*100+0, i*100+100)
#lc.append(d)
ret = assessor.assess_trial(i, lcs[i][:k+1])
print('result: %d', ret)
try:
test()
except Exception as exception:
logger.exception(exception)
raise
\ No newline at end of file
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
__main__.py contains a main function to call Evolution.
'''
import argparse
import logging
from .evolution_tuner import EvolutionTuner, OptimizeMode
logger = logging.getLogger('nni.examples.evolution_tuner')
logger.debug('START')
def main():
'''
main function.
'''
parser = argparse.ArgumentParser(description='parse command line parameters.')
parser.add_argument('--optimize_mode', type=str, default='maximize',
help='Select optimize mode for Tuner: minimize or maximize.')
FLAGS, _ = parser.parse_known_args()
if FLAGS.optimize_mode not in [mode.value for mode in OptimizeMode]:
raise AttributeError('Unsupported optimize mode "%s"' % FLAGS.optimize_mode)
tuner = EvolutionTuner(FLAGS.optimize_mode)
tuner.run()
try:
main()
except Exception as exception:
logger.exception(exception)
raise
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge,
# to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
__main__.py
'''
import argparse
import logging
from .hyperopt_tuner import HyperoptTuner, OptimizeMode
logger = logging.getLogger('nni.examples.hyperopt_tuner')
logger.debug('START')
def main():
'''
main function.
'''
parser = argparse.ArgumentParser(description='parse command line parameters.')
parser.add_argument('--optimize_mode', type=str, default='maximize',
help='Select optimize mode for Tuner: minimize or maximize.')
parser.add_argument('--algorithm_name', type=str, default='tpe',
help='Select algorithm for Tuner: tpe, random_search or anneal.')
FLAGS, _ = parser.parse_known_args()
if FLAGS.optimize_mode not in [mode.value for mode in OptimizeMode]:
raise AttributeError('Unsupported optimize mode "%s"' % FLAGS.optimize_mode)
tuner = HyperoptTuner(FLAGS.algorithm_name, FLAGS.optimize_mode)
tuner.run()
try:
main()
except Exception as exception:
logger.exception(exception)
raise
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import logging
from .medianstop_assessor import MedianstopAssessor
logger = logging.getLogger('nni.contrib.medianstop_assessor')
logger.debug('START')
def main():
'''
main function.
'''
parser = argparse.ArgumentParser(description='parse command line parameters.')
parser.add_argument('--start_from', type=int, default=0, dest='start_step',
help='Assessing each trial from the step start_step.')
parser.add_argument('--optimize_mode', type=str, default='maximize',
help='Select optimize mode for Tuner: minimize or maximize.')
FLAGS, _ = parser.parse_known_args()
assessor = MedianstopAssessor(FLAGS.start_step, FLAGS.optimize_mode)
assessor.run()
try:
main()
except Exception as exception:
logger.exception(exception)
raise
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment