Commit 59a20a31 authored by Toby Boyd's avatar Toby Boyd
Browse files

In two runs 92% to 92.5%. Added decay on everything.

parent ba39a3db
...@@ -23,7 +23,6 @@ from __future__ import absolute_import ...@@ -23,7 +23,6 @@ from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
import os
import warnings import warnings
import tensorflow as tf import tensorflow as tf
...@@ -70,7 +69,12 @@ def _obtain_input_shape(input_shape, ...@@ -70,7 +69,12 @@ def _obtain_input_shape(input_shape,
return input_shape return input_shape
def identity_building_block(input_tensor, kernel_size, filters, stage, block, training): def identity_building_block(input_tensor,
kernel_size,
filters,
stage,
block,
training=None):
"""The identity block is the block that has no conv layer at shortcut. """The identity block is the block that has no conv layer at shortcut.
Arguments: Arguments:
...@@ -80,6 +84,8 @@ def identity_building_block(input_tensor, kernel_size, filters, stage, block, tr ...@@ -80,6 +84,8 @@ def identity_building_block(input_tensor, kernel_size, filters, stage, block, tr
filters: list of integers, the filters of 3 conv layer at main path filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names
training: Only used if training keras model with Estimator. In other
scenarios it is handled automatically.
Returns: Returns:
Output tensor for the block. Output tensor for the block.
...@@ -94,6 +100,7 @@ def identity_building_block(input_tensor, kernel_size, filters, stage, block, tr ...@@ -94,6 +100,7 @@ def identity_building_block(input_tensor, kernel_size, filters, stage, block, tr
x = tf.keras.layers.Conv2D(filters1, kernel_size, x = tf.keras.layers.Conv2D(filters1, kernel_size,
padding='same', padding='same',
kernel_initializer='he_normal',
kernel_regularizer= kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY), tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer= bias_regularizer=
...@@ -103,11 +110,12 @@ def identity_building_block(input_tensor, kernel_size, filters, stage, block, tr ...@@ -103,11 +110,12 @@ def identity_building_block(input_tensor, kernel_size, filters, stage, block, tr
name=bn_name_base + '2a', name=bn_name_base + '2a',
momentum=BATCH_NORM_DECAY, momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON)( epsilon=BATCH_NORM_EPSILON)(
x, training=True) x, training=training)
x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(filters2, kernel_size, x = tf.keras.layers.Conv2D(filters2, kernel_size,
padding='same', padding='same',
kernel_initializer='he_normal',
kernel_regularizer= kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY), tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer= bias_regularizer=
...@@ -117,7 +125,7 @@ def identity_building_block(input_tensor, kernel_size, filters, stage, block, tr ...@@ -117,7 +125,7 @@ def identity_building_block(input_tensor, kernel_size, filters, stage, block, tr
name=bn_name_base + '2b', name=bn_name_base + '2b',
momentum=BATCH_NORM_DECAY, momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON)( epsilon=BATCH_NORM_EPSILON)(
x, training=True) x, training=training)
x = tf.keras.layers.add([x, input_tensor]) x = tf.keras.layers.add([x, input_tensor])
x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Activation('relu')(x)
...@@ -125,12 +133,12 @@ def identity_building_block(input_tensor, kernel_size, filters, stage, block, tr ...@@ -125,12 +133,12 @@ def identity_building_block(input_tensor, kernel_size, filters, stage, block, tr
def conv_building_block(input_tensor, def conv_building_block(input_tensor,
kernel_size, kernel_size,
filters, filters,
stage, stage,
block, block,
strides=(2, 2), strides=(2, 2),
training=True): training=None):
"""A block that has a conv layer at shortcut. """A block that has a conv layer at shortcut.
Arguments: Arguments:
...@@ -141,7 +149,8 @@ def conv_building_block(input_tensor, ...@@ -141,7 +149,8 @@ def conv_building_block(input_tensor,
stage: integer, current stage label, used for generating layer names stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the first conv layer in the block. strides: Strides for the first conv layer in the block.
training: Boolean to indicate if we are in the training loop. training: Only used if training keras model with Estimator. In other
scenarios it is handled automatically.
Returns: Returns:
Output tensor for the block. Output tensor for the block.
...@@ -158,21 +167,23 @@ def conv_building_block(input_tensor, ...@@ -158,21 +167,23 @@ def conv_building_block(input_tensor,
conv_name_base = 'res' + str(stage) + block + '_branch' conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch'
x = tf.keras.layers.Conv2D(filters1, kernel_size, x = tf.keras.layers.Conv2D(filters1, kernel_size, strides=strides,
padding='same', padding='same',
kernel_initializer='he_normal',
kernel_regularizer= kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY), tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer= bias_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY), tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
name=conv_name_base + '2a', strides=strides)(input_tensor) name=conv_name_base + '2a')(input_tensor)
x = tf.keras.layers.BatchNormalization(axis=bn_axis, x = tf.keras.layers.BatchNormalization(axis=bn_axis,
name=bn_name_base + '2a', name=bn_name_base + '2a',
momentum=BATCH_NORM_DECAY, momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON)( epsilon=BATCH_NORM_EPSILON)(
x, training=True) x, training=training)
x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same', x = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same',
kernel_initializer='he_normal',
kernel_regularizer= kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY), tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer= bias_regularizer=
...@@ -182,9 +193,10 @@ def conv_building_block(input_tensor, ...@@ -182,9 +193,10 @@ def conv_building_block(input_tensor,
name=bn_name_base + '2b', name=bn_name_base + '2b',
momentum=BATCH_NORM_DECAY, momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON)( epsilon=BATCH_NORM_EPSILON)(
x, training=True) x, training=training)
shortcut = tf.keras.layers.Conv2D(filters2, (1, 1), strides=strides, shortcut = tf.keras.layers.Conv2D(filters2, (1, 1), strides=strides,
kernel_initializer='he_normal',
kernel_regularizer= kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY), tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer= bias_regularizer=
...@@ -193,19 +205,21 @@ def conv_building_block(input_tensor, ...@@ -193,19 +205,21 @@ def conv_building_block(input_tensor,
shortcut = tf.keras.layers.BatchNormalization( shortcut = tf.keras.layers.BatchNormalization(
axis=bn_axis, name=bn_name_base + '1', axis=bn_axis, name=bn_name_base + '1',
momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON)( momentum=BATCH_NORM_DECAY, epsilon=BATCH_NORM_EPSILON)(
shortcut, training=True) shortcut, training=training)
x = tf.keras.layers.add([x, shortcut]) x = tf.keras.layers.add([x, shortcut])
x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Activation('relu')(x)
return x return x
def ResNet56(input_shape=None, classes=1000): def ResNet56(input_shape=None, classes=100, training=None):
"""Instantiates the ResNet56 architecture. """Instantiates the ResNet56 architecture.
Arguments: Arguments:
input_shape: optional shape tuple input_shape: optional shape tuple
classes: optional number of classes to classify images into classes: optional number of classes to classify images into
training: Only used if training keras model with Estimator. In other
scenarios it is handled automatically.
Returns: Returns:
A Keras model instance. A Keras model instance.
...@@ -226,74 +240,83 @@ def ResNet56(input_shape=None, classes=1000): ...@@ -226,74 +240,83 @@ def ResNet56(input_shape=None, classes=1000):
x = tf.keras.layers.Conv2D(16, (3, 3), x = tf.keras.layers.Conv2D(16, (3, 3),
strides=(1, 1), strides=(1, 1),
padding='valid', padding='valid',
kernel_initializer='he_normal',
kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
name='conv1')(x) name='conv1')(x)
x = tf.keras.layers.BatchNormalization(axis=bn_axis, name='bn_conv1', x = tf.keras.layers.BatchNormalization(axis=bn_axis, name='bn_conv1',
momentum=BATCH_NORM_DECAY, momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON)( epsilon=BATCH_NORM_EPSILON)(
x, training=True) x, training=training)
x = tf.keras.layers.Activation('relu')(x) x = tf.keras.layers.Activation('relu')(x)
# x = tf.keras.layers.MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_building_block(x, 3, [16, 16], stage=2, block='a', strides=(1, 1), x = conv_building_block(x, 3, [16, 16], stage=2, block='a', strides=(1, 1),
training=True) training=training)
x = identity_building_block(x, 3, [16, 16], stage=2, block='b', x = identity_building_block(x, 3, [16, 16], stage=2, block='b',
training=True) training=training)
x = identity_building_block(x, 3, [16, 16], stage=2, block='c', x = identity_building_block(x, 3, [16, 16], stage=2, block='c',
training=True) training=training)
x = identity_building_block(x, 3, [16, 16], stage=2, block='d', x = identity_building_block(x, 3, [16, 16], stage=2, block='d',
training=True) training=training)
x = identity_building_block(x, 3, [16, 16], stage=2, block='e', x = identity_building_block(x, 3, [16, 16], stage=2, block='e',
training=True) training=training)
x = identity_building_block(x, 3, [16, 16], stage=2, block='f', x = identity_building_block(x, 3, [16, 16], stage=2, block='f',
training=True) training=training)
x = identity_building_block(x, 3, [16, 16], stage=2, block='g', x = identity_building_block(x, 3, [16, 16], stage=2, block='g',
training=True) training=training)
x = identity_building_block(x, 3, [16, 16], stage=2, block='h', x = identity_building_block(x, 3, [16, 16], stage=2, block='h',
training=True) training=training)
x = identity_building_block(x, 3, [16, 16], stage=2, block='i', x = identity_building_block(x, 3, [16, 16], stage=2, block='i',
training=True) training=training)
x = conv_building_block(x, 3, [32, 32], stage=3, block='a', x = conv_building_block(x, 3, [32, 32], stage=3, block='a',
training=True) training=training)
x = identity_building_block(x, 3, [32, 32], stage=3, block='b', x = identity_building_block(x, 3, [32, 32], stage=3, block='b',
training=True) training=training)
x = identity_building_block(x, 3, [32, 32], stage=3, block='c', x = identity_building_block(x, 3, [32, 32], stage=3, block='c',
training=True) training=training)
x = identity_building_block(x, 3, [32, 32], stage=3, block='d', x = identity_building_block(x, 3, [32, 32], stage=3, block='d',
training=True) training=training)
x = identity_building_block(x, 3, [32, 32], stage=3, block='e', x = identity_building_block(x, 3, [32, 32], stage=3, block='e',
training=True) training=training)
x = identity_building_block(x, 3, [32, 32], stage=3, block='f', x = identity_building_block(x, 3, [32, 32], stage=3, block='f',
training=True) training=training)
x = identity_building_block(x, 3, [32, 32], stage=3, block='g', x = identity_building_block(x, 3, [32, 32], stage=3, block='g',
training=True) training=training)
x = identity_building_block(x, 3, [32, 32], stage=3, block='h', x = identity_building_block(x, 3, [32, 32], stage=3, block='h',
training=True) training=training)
x = identity_building_block(x, 3, [32, 32], stage=3, block='i', x = identity_building_block(x, 3, [32, 32], stage=3, block='i',
training=True) training=training)
x = conv_building_block(x, 3, [64, 64], stage=4, block='a', x = conv_building_block(x, 3, [64, 64], stage=4, block='a',
training=True) training=training)
x = identity_building_block(x, 3, [64, 64], stage=4, block='b', x = identity_building_block(x, 3, [64, 64], stage=4, block='b',
training=True) training=training)
x = identity_building_block(x, 3, [64, 64], stage=4, block='c', x = identity_building_block(x, 3, [64, 64], stage=4, block='c',
training=True) training=training)
x = identity_building_block(x, 3, [64, 64], stage=4, block='d', x = identity_building_block(x, 3, [64, 64], stage=4, block='d',
training=True) training=training)
x = identity_building_block(x, 3, [64, 64], stage=4, block='e', x = identity_building_block(x, 3, [64, 64], stage=4, block='e',
training=True) training=training)
x = identity_building_block(x, 3, [64, 64], stage=4, block='f', x = identity_building_block(x, 3, [64, 64], stage=4, block='f',
training=True) training=training)
x = identity_building_block(x, 3, [64, 64], stage=4, block='g', x = identity_building_block(x, 3, [64, 64], stage=4, block='g',
training=True) training=training)
x = identity_building_block(x, 3, [64, 64], stage=4, block='h', x = identity_building_block(x, 3, [64, 64], stage=4, block='h',
training=True) training=training)
x = identity_building_block(x, 3, [64, 64], stage=4, block='i', x = identity_building_block(x, 3, [64, 64], stage=4, block='i',
training=True) training=training)
x = tf.keras.layers.AveragePooling2D((8, 8), name='avg_pool')(x) x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = tf.keras.layers.Flatten()(x) x = tf.keras.layers.Dense(classes, activation='softmax',
x = tf.keras.layers.Dense(classes, activation='softmax', name='fc10')(x) kernel_initializer='he_normal',
kernel_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
bias_regularizer=
tf.keras.regularizers.l2(L2_WEIGHT_DECAY),
name='fc10')(x)
inputs = img_input inputs = img_input
# Create model. # Create model.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment