"examples/git@developer.sourcefind.cn:hehl2/torchaudio.git" did not exist on "7b6b2d000023e2aa3365b769866c5f375e0d5fda"
Commit 3e0475a1 authored by Pete Warden's avatar Pete Warden
Browse files

Added mobilenet support

parent e648b94a
...@@ -63,7 +63,6 @@ from tensorflow.python.platform import gfile ...@@ -63,7 +63,6 @@ from tensorflow.python.platform import gfile
from datasets import dataset_factory from datasets import dataset_factory
from nets import nets_factory from nets import nets_factory
slim = tf.contrib.slim slim = tf.contrib.slim
tf.app.flags.DEFINE_string( tf.app.flags.DEFINE_string(
...@@ -74,8 +73,8 @@ tf.app.flags.DEFINE_boolean( ...@@ -74,8 +73,8 @@ tf.app.flags.DEFINE_boolean(
'Whether to save out a training-focused version of the model.') 'Whether to save out a training-focused version of the model.')
tf.app.flags.DEFINE_integer( tf.app.flags.DEFINE_integer(
'default_image_size', 224, 'image_size', None,
'The image size to use if the model does not define it.') 'The image size to use, otherwise use the model default_image_size.')
tf.app.flags.DEFINE_string('dataset_name', 'imagenet', tf.app.flags.DEFINE_string('dataset_name', 'imagenet',
'The name of the dataset to use with the model.') 'The name of the dataset to use with the model.')
...@@ -106,10 +105,7 @@ def main(_): ...@@ -106,10 +105,7 @@ def main(_):
FLAGS.model_name, FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset), num_classes=(dataset.num_classes - FLAGS.labels_offset),
is_training=FLAGS.is_training) is_training=FLAGS.is_training)
if hasattr(network_fn, 'default_image_size'): image_size = FLAGS.image_size or network_fn.default_image_size
image_size = network_fn.default_image_size
else:
image_size = FLAGS.default_image_size
placeholder = tf.placeholder(name='input', dtype=tf.float32, placeholder = tf.placeholder(name='input', dtype=tf.float32,
shape=[1, image_size, image_size, 3]) shape=[1, image_size, image_size, 3])
network_fn(placeholder) network_fn(placeholder)
......
...@@ -27,6 +27,8 @@ As described in https://arxiv.org/abs/1704.04861. ...@@ -27,6 +27,8 @@ As described in https://arxiv.org/abs/1704.04861.
100% Mobilenet V1 (base) with input size 224x224: 100% Mobilenet V1 (base) with input size 224x224:
See mobilenet_v1()
Layer params macs Layer params macs
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 864 10,838,016 MobilenetV1/Conv2d_0/Conv2D: 864 10,838,016
...@@ -62,6 +64,8 @@ Total: 3,185,088 567,716,352 ...@@ -62,6 +64,8 @@ Total: 3,185,088 567,716,352
75% Mobilenet V1 (base) with input size 128x128: 75% Mobilenet V1 (base) with input size 128x128:
See mobilenet_v1_075()
Layer params macs Layer params macs
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 648 2,654,208 MobilenetV1/Conv2d_0/Conv2D: 648 2,654,208
...@@ -102,6 +106,7 @@ from __future__ import division ...@@ -102,6 +106,7 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
from collections import namedtuple from collections import namedtuple
import functools
import tensorflow as tf import tensorflow as tf
...@@ -335,6 +340,17 @@ def mobilenet_v1(inputs, ...@@ -335,6 +340,17 @@ def mobilenet_v1(inputs,
mobilenet_v1.default_image_size = 224 mobilenet_v1.default_image_size = 224
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
mobilenet_v1_075 = wrapped_partial(mobilenet_v1, depth_multiplier=0.75)
mobilenet_v1_050 = wrapped_partial(mobilenet_v1, depth_multiplier=0.50)
mobilenet_v1_025 = wrapped_partial(mobilenet_v1, depth_multiplier=0.25)
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size): def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input. """Define kernel size which is automatically reduced for small input.
......
...@@ -54,6 +54,9 @@ networks_map = {'alexnet_v2': alexnet.alexnet_v2, ...@@ -54,6 +54,9 @@ networks_map = {'alexnet_v2': alexnet.alexnet_v2,
'resnet_v2_152': resnet_v2.resnet_v2_152, 'resnet_v2_152': resnet_v2.resnet_v2_152,
'resnet_v2_200': resnet_v2.resnet_v2_200, 'resnet_v2_200': resnet_v2.resnet_v2_200,
'mobilenet_v1': mobilenet_v1.mobilenet_v1, 'mobilenet_v1': mobilenet_v1.mobilenet_v1,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_075,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_050,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_025,
} }
arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope, arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope,
...@@ -78,6 +81,9 @@ arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope, ...@@ -78,6 +81,9 @@ arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope,
'resnet_v2_152': resnet_v2.resnet_arg_scope, 'resnet_v2_152': resnet_v2.resnet_arg_scope,
'resnet_v2_200': resnet_v2.resnet_arg_scope, 'resnet_v2_200': resnet_v2.resnet_arg_scope,
'mobilenet_v1': mobilenet_v1.mobilenet_v1_arg_scope, 'mobilenet_v1': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_arg_scope,
} }
......
#!/bin/bash
# This script prepares the various different versions of MobileNet models for
# use in a mobile application. If you don't specify your own trained checkpoint
# file, it will download pretrained checkpoints for ImageNet. You'll also need
# to have a copy of the TensorFlow source code to run some of the commands,
# by default it will be looked for in ./tensorflow, but you can set the
# TENSORFLOW_PATH environment variable before calling the script if your source
# is in a different location.
# The main slim/nets/mobilenet_v1.md description has more details about the
# model, but the main points are that it comes in four size versions, 1.0, 0.75,
# 0.50, and 0.25, which controls the number of parameters and so the file size
# of the model, and the input image size, which can be 224, 192, 160, or 128
# pixels, and affects the amount of computation needed, and the latency.
# Here's an example generating a frozen model from pretrained weights:
#
set -e
print_usage () {
echo "Creates a frozen mobilenet model suitable for mobile use"
echo "Usage:"
echo "$0 <mobilenet version> <input size> [checkpoint path]"
}
MOBILENET_VERSION=$1
IMAGE_SIZE=$2
CHECKPOINT=$3
if [[ ${MOBILENET_VERSION} = "1.0" ]]; then
SLIM_NAME=mobilenet_v1
elif [[ ${MOBILENET_VERSION} = "0.75" ]]; then
SLIM_NAME=mobilenet_v1_075
elif [[ ${MOBILENET_VERSION} = "0.50" ]]; then
SLIM_NAME=mobilenet_v1_050
elif [[ ${MOBILENET_VERSION} = "0.25" ]]; then
SLIM_NAME=mobilenet_v1_025
else
echo "Bad mobilenet version, should be one of 1.0, 0.75, 0.50, or 0.25"
print_usage
exit 1
fi
if [[ ${IMAGE_SIZE} -ne "224" ]] && [[ ${IMAGE_SIZE} -ne "192" ]] && [[ ${IMAGE_SIZE} -ne "160" ]] && [[ ${IMAGE_SIZE} -ne "128" ]]; then
echo "Bad input image size, should be one of 224, 192, 160, or 128"
print_usage
exit 1
fi
if [[ ${TENSORFLOW_PATH} -eq "" ]]; then
TENSORFLOW_PATH=../tensorflow
fi
if [[ ! -d ${TENSORFLOW_PATH} ]]; then
echo "TensorFlow source folder not found. You should download the source and then set"
echo "the TENSORFLOW_PATH environment variable to point to it, like this:"
echo "export TENSORFLOW_PATH=/my/path/to/tensorflow"
print_usage
exit 1
fi
MODEL_FOLDER=/tmp/mobilenet_v1_${MOBILENET_VERSION}_${IMAGE_SIZE}
if [[ -d ${MODEL_FOLDER} ]]; then
echo "Model folder ${MODEL_FOLDER} already exists!"
echo "If you want to overwrite it, then 'rm -rf ${MODEL_FOLDER}' first."
print_usage
exit 1
fi
mkdir ${MODEL_FOLDER}
if [[ ${CHECKPOINT} = "" ]]; then
echo "*******"
echo "Downloading pretrained weights"
echo "*******"
curl "http://download.tensorflow.org/models/mobilenet_v1_${MOBILENET_VERSION}_${IMAGE_SIZE}_2017_06_14.tar.gz" \
-o ${MODEL_FOLDER}/checkpoints.tar.gz
tar xzf ${MODEL_FOLDER}/checkpoints.tar.gz --directory ${MODEL_FOLDER}
CHECKPOINT=${MODEL_FOLDER}/mobilenet_v1_${MOBILENET_VERSION}_${IMAGE_SIZE}.ckpt
fi
echo "*******"
echo "Exporting graph architecture to ${MODEL_FOLDER}/unfrozen_graph.pb"
echo "*******"
bazel run slim:export_inference_graph -- \
--model_name=${SLIM_NAME} --image_size=${IMAGE_SIZE} --logtostderr \
--output_file=${MODEL_FOLDER}/unfrozen_graph.pb --dataset_dir=${MODEL_FOLDER}
cd ../tensorflow
echo "*******"
echo "Freezing graph to ${MODEL_FOLDER}/frozen_graph.pb"
echo "*******"
bazel run tensorflow/python/tools:freeze_graph -- \
--input_graph=${MODEL_FOLDER}/unfrozen_graph.pb \
--input_checkpoint=${CHECKPOINT} \
--input_binary=true --output_graph=${MODEL_FOLDER}/frozen_graph.pb \
--output_node_names=MobilenetV1/Predictions/Reshape_1
echo "Quantizing weights to ${MODEL_FOLDER}/quantized_graph.pb"
bazel run tensorflow/tools/graph_transforms:transform_graph -- \
--in_graph=${MODEL_FOLDER}/frozen_graph.pb \
--out_graph=${MODEL_FOLDER}/quantized_graph.pb \
--inputs=input --outputs=MobilenetV1/Predictions/Reshape_1 \
--transforms='fold_constants fold_batch_norms quantize_weights'
echo "*******"
echo "Running label_image using the graph"
echo "*******"
bazel build tensorflow/examples/label_image:label_image
bazel-bin/tensorflow/examples/label_image/label_image \
--input_layer=input --output_layer=MobilenetV1/Predictions/Reshape_1 \
--graph=${MODEL_FOLDER}/quantized_graph.pb --input_mean=-127 --input_std=127 \
--image=tensorflow/examples/label_image/data/grace_hopper.jpg \
--input_width=${IMAGE_SIZE} --input_height=${IMAGE_SIZE} --labels=${MODEL_FOLDER}/labels.txt
echo "*******"
echo "Saved graphs to ${MODEL_FOLDER}/frozen_graph.pb and ${MODEL_FOLDER}/quantized_graph.pb"
echo "*******"
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment