Unverified Commit 0c17e2d9 authored by chicm-ms's avatar chicm-ms Committed by GitHub
Browse files

Sdk update (#272)

* Rename get_parameters to get_next_parameter

* annotations add get_next_parameter

* updates

* updates

* updates

* updates

* updates
parent a1014619
......@@ -4,23 +4,26 @@ For good user experience and reduce user effort, we need to design a good annota
If users use NNI system, they only need to:
1. Annotation variable in code as:
1. Use nni.get_next_parameter() to retrieve hyper parameters from Tuner, before using other annotation, use following annotation at the begining of trial code:
'''@nni.get_next_parameter()'''
2. Annotation variable in code as:
'''@nni.variable(nni.choice(2,3,5,7),name=self.conv_size)'''
2. Annotation intermediate in code as:
3. Annotation intermediate in code as:
'''@nni.report_intermediate_result(test_acc)'''
3. Annotation output in code as:
4. Annotation output in code as:
'''@nni.report_final_result(test_acc)'''
4. Annotation `function_choice` in code as:
5. Annotation `function_choice` in code as:
'''@nni.function_choice(max_pool(h_conv1, self.pool_size),avg_pool(h_conv1, self.pool_size),name=max_pool)'''
In this way, they can easily realize automatic tuning on NNI.
In this way, they can easily implement automatic tuning on NNI.
For `@nni.variable`, `nni.choice` is the type of search space and there are 10 types to express your search space as follows:
......
......@@ -27,7 +27,7 @@ Refer to [SearchSpaceSpec.md](SearchSpaceSpec.md) to learn more about search spa
2.2 Get predefined parameters
Use the following code snippet:
RECEIVED_PARAMS = nni.get_parameters()
RECEIVED_PARAMS = nni.get_next_parameter()
to get hyper-parameters' values assigned by tuner. `RECEIVED_PARAMS` is an object, for example:
......
......@@ -61,7 +61,7 @@ If the you implement the ```generate_parameters``` like this:
# your code implements here.
return {"dropout": 0.3, "learning_rate": 0.4}
```
It's means your Tuner will always generate parameters ```{"dropout": 0.3, "learning_rate": 0.4}```. Then Trial will receive ```{"dropout": 0.3, "learning_rate": 0.4}``` this object will using ```nni.get_parameters()``` API from NNI SDK. After training of Trial, it will send result to Tuner by calling ```nni.report_final_result(0.93)```. Then ```receive_trial_result``` will function will receied these parameters like:
It means your Tuner will always generate parameters ```{"dropout": 0.3, "learning_rate": 0.4}```. Then Trial will receive ```{"dropout": 0.3, "learning_rate": 0.4}``` by calling API ```nni.get_next_parameter()```. Once the trial ends with a result (normally some kind of metrics), it can send the result to Tuner by calling API ```nni.report_final_result()```, for example ```nni.report_final_result(0.93)```. Then your Tuner's ```receive_trial_result``` function will receied the result like:
```
parameter_id = 82347
parameters = {"dropout": 0.3, "learning_rate": 0.4}
......
# How to write a Trial running on NNI?
*Trial receive the hyper-parameter/architecture configure from Tuner, and send intermediate result to Assessor and final result to Tuner.*
So when user want to write a Trial running on NNI, she/he should:
**1)Have an original Trial could run**,
Trial's code could be any machine learning code that could run in local. Here we use ```mnist-keras.py``` as example:
```python
import argparse
import logging
import keras
import numpy as np
from keras import backend as K
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.models import Sequential
K.set_image_data_format('channels_last')
H, W = 28, 28
NUM_CLASSES = 10
def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES):
layers = [
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(100, activation='relu'),
Dense(num_classes, activation='softmax')
]
model = Sequential(layers)
if hyper_params['optimizer'] == 'Adam':
optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate'])
else:
optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9)
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy'])
return model
def load_mnist_data(args):
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]
return x_train, y_train, x_test, y_test
class SendMetrics(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
pass
def train(args, params):
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callbacks=[SendMetrics()])
_, acc = model.evaluate(x_test, y_test, verbose=0)
def generate_default_params():
return {
'optimizer': 'Adam',
'learning_rate': 0.001
}
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False)
PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False)
PARSER.add_argument("--num_train", type=int, default=1000, help="Number of train samples to be used, maximum 60000", required=False)
PARSER.add_argument("--num_test", type=int, default=1000, help="Number of test samples to be used, maximum 10000", required=False)
ARGS, UNKNOWN = PARSER.parse_known_args()
PARAMS = generate_default_params()
train(ARGS, PARAMS)
```
**2)Get configure from Tuner**
User import ```nni``` and use ```nni.get_parameters()``` to recive configure. Please noted **10**, **24** and **25** line in the following code.
```python
import argparse
import logging
import keras
import numpy as np
from keras import backend as K
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.models import Sequential
import nni
...
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False)
PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False)
PARSER.add_argument("--num_train", type=int, default=1000, help="Number of train samples to be used, maximum 60000", required=False)
PARSER.add_argument("--num_test", type=int, default=1000, help="Number of test samples to be used, maximum 10000", required=False)
ARGS, UNKNOWN = PARSER.parse_known_args()
PARAMS = generate_default_params()
RECEIVED_PARAMS = nni.get_parameters()
PARAMS.update(RECEIVED_PARAMS)
train(ARGS, PARAMS)
```
**3) Send intermediate result**
Use ```nni.report_intermediate_result``` to send intermediate result to Assessor. Please noted **5** line in the following code.
```python
...
class SendMetrics(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
nni.report_intermediate_result(logs)
def train(args, params):
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callbacks=[SendMetrics()])
_, acc = model.evaluate(x_test, y_test, verbose=0)
...
```
**4) Send final result**
Use ```nni.report_final_result``` to send final result to Trial. Please noted **15** line in the following code.
```python
...
class SendMetrics(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
nni.report_intermediate_result(logs)
def train(args, params):
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callbacks=[SendMetrics()])
_, acc = model.evaluate(x_test, y_test, verbose=0)
nni.report_final_result(acc)
...
```
Here is the complete exampe:
```python
import argparse
import logging
import keras
import numpy as np
from keras import backend as K
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.models import Sequential
import nni
LOG = logging.getLogger('mnist_keras')
K.set_image_data_format('channels_last')
H, W = 28, 28
NUM_CLASSES = 10
def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES):
'''
Create simple convolutional model
'''
layers = [
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(100, activation='relu'),
Dense(num_classes, activation='softmax')
]
model = Sequential(layers)
if hyper_params['optimizer'] == 'Adam':
optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate'])
else:
optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9)
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy'])
return model
def load_mnist_data(args):
'''
Load MNIST dataset
'''
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]
LOG.debug('x_train shape: %s', (x_train.shape,))
LOG.debug('x_test shape: %s', (x_test.shape,))
return x_train, y_train, x_test, y_test
class SendMetrics(keras.callbacks.Callback):
'''
Keras callback to send metrics to NNI framework
'''
def on_epoch_end(self, epoch, logs={}):
'''
Run on end of each epoch
'''
LOG.debug(logs)
nni.report_intermediate_result(logs)
def train(args, params):
'''
Train model
'''
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callbacks=[SendMetrics()])
_, acc = model.evaluate(x_test, y_test, verbose=0)
LOG.debug('Final result is: %d', acc)
nni.report_final_result(acc)
def generate_default_params():
'''
Generate default hyper parameters
'''
return {
'optimizer': 'Adam',
'learning_rate': 0.001
}
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False)
PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False)
PARSER.add_argument("--num_train", type=int, default=1000, help="Number of train samples to be used, maximum 60000", required=False)
PARSER.add_argument("--num_test", type=int, default=1000, help="Number of test samples to be used, maximum 10000", required=False)
ARGS, UNKNOWN = PARSER.parse_known_args()
try:
# get parameters from tuner
RECEIVED_PARAMS = nni.get_parameters()
LOG.debug(RECEIVED_PARAMS)
PARAMS = generate_default_params()
PARAMS.update(RECEIVED_PARAMS)
# train
train(ARGS, PARAMS)
except Exception as e:
LOG.exception(e)
raise
# How to write a Trial running on NNI?
*Trial receive the hyper-parameter/architecture configure from Tuner, and send intermediate result to Assessor and final result to Tuner.*
So when user want to write a Trial running on NNI, she/he should:
**1)Have an original Trial could run**,
Trial's code could be any machine learning code that could run in local. Here we use ```mnist-keras.py``` as example:
```python
import argparse
import logging
import keras
import numpy as np
from keras import backend as K
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.models import Sequential
K.set_image_data_format('channels_last')
H, W = 28, 28
NUM_CLASSES = 10
def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES):
layers = [
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(100, activation='relu'),
Dense(num_classes, activation='softmax')
]
model = Sequential(layers)
if hyper_params['optimizer'] == 'Adam':
optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate'])
else:
optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9)
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy'])
return model
def load_mnist_data(args):
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]
return x_train, y_train, x_test, y_test
class SendMetrics(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
pass
def train(args, params):
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callbacks=[SendMetrics()])
_, acc = model.evaluate(x_test, y_test, verbose=0)
def generate_default_params():
return {
'optimizer': 'Adam',
'learning_rate': 0.001
}
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False)
PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False)
PARSER.add_argument("--num_train", type=int, default=1000, help="Number of train samples to be used, maximum 60000", required=False)
PARSER.add_argument("--num_test", type=int, default=1000, help="Number of test samples to be used, maximum 10000", required=False)
ARGS, UNKNOWN = PARSER.parse_known_args()
PARAMS = generate_default_params()
train(ARGS, PARAMS)
```
**2)Get configure from Tuner**
User import ```nni``` and use ```nni.get_next_parameter()``` to recive configure. Please noted **10**, **24** and **25** line in the following code.
```python
import argparse
import logging
import keras
import numpy as np
from keras import backend as K
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.models import Sequential
import nni
...
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False)
PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False)
PARSER.add_argument("--num_train", type=int, default=1000, help="Number of train samples to be used, maximum 60000", required=False)
PARSER.add_argument("--num_test", type=int, default=1000, help="Number of test samples to be used, maximum 10000", required=False)
ARGS, UNKNOWN = PARSER.parse_known_args()
PARAMS = generate_default_params()
RECEIVED_PARAMS = nni.get_next_parameter()
PARAMS.update(RECEIVED_PARAMS)
train(ARGS, PARAMS)
```
**3) Send intermediate result**
Use ```nni.report_intermediate_result``` to send intermediate result to Assessor. Please noted **5** line in the following code.
```python
...
class SendMetrics(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
nni.report_intermediate_result(logs)
def train(args, params):
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callbacks=[SendMetrics()])
_, acc = model.evaluate(x_test, y_test, verbose=0)
...
```
**4) Send final result**
Use ```nni.report_final_result``` to send final result to Trial. Please noted **15** line in the following code.
```python
...
class SendMetrics(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
nni.report_intermediate_result(logs)
def train(args, params):
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callbacks=[SendMetrics()])
_, acc = model.evaluate(x_test, y_test, verbose=0)
nni.report_final_result(acc)
...
```
Here is the complete exampe:
```python
import argparse
import logging
import keras
import numpy as np
from keras import backend as K
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from keras.models import Sequential
import nni
LOG = logging.getLogger('mnist_keras')
K.set_image_data_format('channels_last')
H, W = 28, 28
NUM_CLASSES = 10
def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES):
'''
Create simple convolutional model
'''
layers = [
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(100, activation='relu'),
Dense(num_classes, activation='softmax')
]
model = Sequential(layers)
if hyper_params['optimizer'] == 'Adam':
optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate'])
else:
optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9)
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy'])
return model
def load_mnist_data(args):
'''
Load MNIST dataset
'''
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (np.expand_dims(x_train, -1).astype(np.float) / 255.)[:args.num_train]
x_test = (np.expand_dims(x_test, -1).astype(np.float) / 255.)[:args.num_test]
y_train = keras.utils.to_categorical(y_train, NUM_CLASSES)[:args.num_train]
y_test = keras.utils.to_categorical(y_test, NUM_CLASSES)[:args.num_test]
LOG.debug('x_train shape: %s', (x_train.shape,))
LOG.debug('x_test shape: %s', (x_test.shape,))
return x_train, y_train, x_test, y_test
class SendMetrics(keras.callbacks.Callback):
'''
Keras callback to send metrics to NNI framework
'''
def on_epoch_end(self, epoch, logs={}):
'''
Run on end of each epoch
'''
LOG.debug(logs)
nni.report_intermediate_result(logs)
def train(args, params):
'''
Train model
'''
x_train, y_train, x_test, y_test = load_mnist_data(args)
model = create_mnist_model(params)
model.fit(x_train, y_train, batch_size=args.batch_size, epochs=args.epochs, verbose=1,
validation_data=(x_test, y_test), callbacks=[SendMetrics()])
_, acc = model.evaluate(x_test, y_test, verbose=0)
LOG.debug('Final result is: %d', acc)
nni.report_final_result(acc)
def generate_default_params():
'''
Generate default hyper parameters
'''
return {
'optimizer': 'Adam',
'learning_rate': 0.001
}
if __name__ == '__main__':
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--batch_size", type=int, default=200, help="batch size", required=False)
PARSER.add_argument("--epochs", type=int, default=10, help="Train epochs", required=False)
PARSER.add_argument("--num_train", type=int, default=1000, help="Number of train samples to be used, maximum 60000", required=False)
PARSER.add_argument("--num_test", type=int, default=1000, help="Number of test samples to be used, maximum 10000", required=False)
ARGS, UNKNOWN = PARSER.parse_known_args()
try:
# get parameters from tuner
RECEIVED_PARAMS = nni.get_next_parameter()
LOG.debug(RECEIVED_PARAMS)
PARAMS = generate_default_params()
PARAMS.update(RECEIVED_PARAMS)
# train
train(ARGS, PARAMS)
except Exception as e:
LOG.exception(e)
raise
```
\ No newline at end of file
......@@ -97,7 +97,7 @@ if __name__ == '__main__':
try:
# get parameters from tuner
RECEIVED_PARAMS = nni.get_parameters()
RECEIVED_PARAMS = nni.get_next_parameter()
LOG.debug(RECEIVED_PARAMS)
PARAMS = get_default_parameters()
PARAMS.update(RECEIVED_PARAMS)
......
......@@ -436,7 +436,7 @@ if __name__ == '__main__':
qp_pairs, dev_qp_pairs = load_data()
logger.debug('Init finish.')
original_params = nni.get_parameters()
original_params = nni.get_next_parameter()
'''
with open('data.json') as f:
original_params = json.load(f)
......
......@@ -229,6 +229,7 @@ def generate_defualt_params():
if __name__ == '__main__':
'''@nni.get_next_parameter()'''
try:
main(generate_defualt_params())
except Exception as exception:
......
......@@ -122,7 +122,7 @@ if __name__ == '__main__':
try:
# get parameters from tuner
# RECEIVED_PARAMS = {"optimizer": "Adam", "learning_rate": 0.00001}
RECEIVED_PARAMS = nni.get_parameters()
RECEIVED_PARAMS = nni.get_next_parameter()
LOG.debug(RECEIVED_PARAMS)
PARAMS = generate_default_params()
PARAMS.update(RECEIVED_PARAMS)
......
......@@ -149,7 +149,7 @@ def parse_init_json(data):
if __name__ == '__main__':
try:
# get parameters form tuner
data = nni.get_parameters()
data = nni.get_next_parameter()
logger.debug(data)
RCV_PARAMS = parse_init_json(data)
......
......@@ -120,7 +120,7 @@ if __name__ == '__main__':
try:
# get parameters from tuner
RECEIVED_PARAMS = nni.get_parameters()
RECEIVED_PARAMS = nni.get_next_parameter()
LOG.debug(RECEIVED_PARAMS)
PARAMS = generate_default_params()
PARAMS.update(RECEIVED_PARAMS)
......
......@@ -219,7 +219,7 @@ def generate_default_params():
if __name__ == '__main__':
try:
# get parameters form tuner
RCV_PARAMS = nni.get_parameters()
RCV_PARAMS = nni.get_next_parameter()
logger.debug(RCV_PARAMS)
# run
params = generate_default_params()
......
......@@ -175,7 +175,7 @@ def test(epoch):
if __name__ == '__main__':
try:
RCV_CONFIG = nni.get_parameters()
RCV_CONFIG = nni.get_next_parameter()
#RCV_CONFIG = {'lr': 0.1, 'optimizer': 'Adam', 'model':'senet18'}
_logger.debug(RCV_CONFIG)
......
......@@ -71,7 +71,7 @@ if __name__ == '__main__':
try:
# get parameters from tuner
RECEIVED_PARAMS = nni.get_parameters()
RECEIVED_PARAMS = nni.get_next_parameter()
LOG.debug(RECEIVED_PARAMS)
PARAMS = get_default_parameters()
PARAMS.update(RECEIVED_PARAMS)
......
......@@ -90,7 +90,7 @@ if __name__ == '__main__':
try:
# get parameters from tuner
RECEIVED_PARAMS = nni.get_parameters()
RECEIVED_PARAMS = nni.get_next_parameter()
LOG.debug(RECEIVED_PARAMS)
PARAMS = get_default_parameters()
PARAMS.update(RECEIVED_PARAMS)
......
......@@ -49,13 +49,18 @@ def request_next_parameter():
})
send_metric(metric)
def get_parameters():
def get_next_parameter():
global _param_index
params_file_name = ''
if _multiphase and (_multiphase == 'true' or _multiphase == 'True'):
params_file_name = ('parameter_{}.cfg'.format(_param_index), 'parameter.cfg')[_param_index == 0]
else:
params_file_name = 'parameter.cfg'
if _param_index > 0:
return None
elif _param_index == 0:
params_file_name = 'parameter.cfg'
else:
raise AssertionError('_param_index value ({}) should >=0'.format(_param_index))
params_filepath = os.path.join(_sysdir, params_file_name)
if not os.path.isfile(params_filepath):
......
......@@ -22,7 +22,7 @@
import json_tricks
def get_parameters():
def get_next_parameter():
pass
def get_sequence_id():
......
......@@ -29,7 +29,7 @@ _params = None
_last_metric = None
def get_parameters():
def get_next_parameter():
return _params
def send_metric(string):
......
......@@ -126,4 +126,4 @@ else:
if name is None:
name = '__line{:d}'.format(lineno)
key = '{}/{}/{}'.format(module, name, func)
return trial.get_parameter(key)
return trial.get_current_parameter(key)
......@@ -26,7 +26,8 @@ from . import platform
__all__ = [
'get_parameters',
'get_next_parameter',
'get_current_parameter',
'report_intermediate_result',
'report_final_result',
'get_sequence_id'
......@@ -37,15 +38,18 @@ _params = None
_sequence_id = platform.get_sequence_id()
def get_parameters():
def get_next_parameter():
"""Returns a set of (hyper-)paremeters generated by Tuner."""
global _params
_params = platform.get_parameters()
_params = platform.get_next_parameter()
if _params is None:
return None
return _params['parameters']
def get_parameter(tag):
return get_parameters()[tag]
def get_current_parameter(tag):
if _params is None:
return None
return _params['parameters'][tag]
def get_sequence_id():
return _sequence_id
......@@ -57,7 +61,7 @@ def report_intermediate_result(metric):
metric: serializable object.
"""
global _intermediate_seq
assert _params is not None, 'nni.get_parameters() needs to be called before report_intermediate_result'
assert _params is not None, 'nni.get_next_parameter() needs to be called before report_intermediate_result'
metric = json_tricks.dumps({
'parameter_id': _params['parameter_id'],
'trial_job_id': env_args.trial_job_id,
......@@ -73,7 +77,7 @@ def report_final_result(metric):
"""Reports final result to tuner.
metric: serializable object.
"""
assert _params is not None, 'nni.get_parameters() needs to be called before report_final_result'
assert _params is not None, 'nni.get_next_parameter() needs to be called before report_final_result'
metric = json_tricks.dumps({
'parameter_id': _params['parameter_id'],
'trial_job_id': env_args.trial_job_id,
......
......@@ -32,8 +32,8 @@ class TrialTestCase(TestCase):
self._trial_params = { 'msg': 'hi', 'x': 123, 'dict': { 'key': 'value', 'y': None } }
nni.trial._params = { 'parameter_id': 'test_param', 'parameters': self._trial_params }
def test_get_parameters(self):
self.assertEqual(nni.get_parameters(), self._trial_params)
def test_get_next_parameter(self):
self.assertEqual(nni.get_next_parameter(), self._trial_params)
def test_report_intermediate_result(self):
nni.report_intermediate_result(123)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment