model.py 2.92 KB
Newer Older
liuzhe-lz's avatar
liuzhe-lz committed
1
2
3
4
5
6
7
"""
Port TensorFlow Quickstart to NNI
=================================
This is a modified version of `TensorFlow quickstart`_.

It can be run directly and will have the exact same result as original version.

liuzhe-lz's avatar
liuzhe-lz committed
8
Furthermore, it enables the ability of auto tuning with an NNI *experiment*, which will be detailed later.
liuzhe-lz's avatar
liuzhe-lz committed
9

liuzhe-lz's avatar
liuzhe-lz committed
10
It is recommended to run this script directly first to verify the environment.
liuzhe-lz's avatar
liuzhe-lz committed
11

liuzhe-lz's avatar
liuzhe-lz committed
12
There are 3 key differences from the original version:
liuzhe-lz's avatar
liuzhe-lz committed
13

liuzhe-lz's avatar
liuzhe-lz committed
14
15
16
1. In `Get optimized hyperparameters`_ part, it receives generated hyperparameters.
2. In `(Optional) Report intermediate results`_ part, it reports per-epoch accuracy metrics.
3. In `Report final result`_ part, it reports final accuracy.
liuzhe-lz's avatar
liuzhe-lz committed
17
18
19
20
21
22
23
24
25
26
27

.. _TensorFlow quickstart: https://www.tensorflow.org/tutorials/quickstart/beginner
"""

# %%
import nni
import tensorflow as tf

# %%
# Hyperparameters to be tuned
# ---------------------------
liuzhe-lz's avatar
liuzhe-lz committed
28
# These are the hyperparameters that will be tuned later.
liuzhe-lz's avatar
liuzhe-lz committed
29
30
31
32
33
34
35
36
37
38
params = {
    'dense_units': 128,
    'activation_type': 'relu',
    'dropout_rate': 0.2,
    'learning_rate': 0.001,
}

# %%
# Get optimized hyperparameters
# -----------------------------
liuzhe-lz's avatar
liuzhe-lz committed
39
# If run directly, :func:`nni.get_next_parameter` is a no-op and returns an empty dict.
liuzhe-lz's avatar
liuzhe-lz committed
40
41
42
# But with an NNI *experiment*, it will receive optimized hyperparameters from tuning algorithm.
optimized_params = nni.get_next_parameter()
params.update(optimized_params)
liuzhe-lz's avatar
liuzhe-lz committed
43
print(params)
liuzhe-lz's avatar
liuzhe-lz committed
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69

# %%
# Load dataset
# ------------
mnist = tf.keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

# %%
# Build model with hyperparameters
# --------------------------------
model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(input_shape=(28, 28)),
    tf.keras.layers.Dense(params['dense_units'], activation=params['activation_type']),
    tf.keras.layers.Dropout(params['dropout_rate']),
    tf.keras.layers.Dense(10)
])

adam = tf.keras.optimizers.Adam(learning_rate=params['learning_rate'])
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer=adam, loss=loss_fn, metrics=['accuracy'])

# %%
# (Optional) Report intermediate results
# --------------------------------------
liuzhe-lz's avatar
liuzhe-lz committed
70
71
# The callback reports per-epoch accuracy to show learning curve in the web portal.
# You can also leverage the metrics for early stopping with :doc:`NNI assessors </hpo/assessors>`.
liuzhe-lz's avatar
liuzhe-lz committed
72
#
liuzhe-lz's avatar
liuzhe-lz committed
73
# This part can be safely skipped and the experiment will work fine.
liuzhe-lz's avatar
liuzhe-lz committed
74
75
76
77
78
79
80
81
82
83
84
85
86
callback = tf.keras.callbacks.LambdaCallback(
    on_epoch_end = lambda epoch, logs: nni.report_intermediate_result(logs['accuracy'])
)

# %%
# Train and evluate the model
# ---------------------------
model.fit(x_train, y_train, epochs=5, verbose=2, callbacks=[callback])
loss, accuracy = model.evaluate(x_test, y_test, verbose=2)

# %%
# Report final result
# -------------------
liuzhe-lz's avatar
liuzhe-lz committed
87
# Report final accuracy to NNI so the tuning algorithm can suggest better hyperparameters.
liuzhe-lz's avatar
liuzhe-lz committed
88
nni.report_final_result(accuracy)