Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
1f4747a4
Commit
1f4747a4
authored
Apr 02, 2018
by
pkulzc
Browse files
Merge remote-tracking branch 'upstream/master'
parents
d2d01f4f
a7aa25d3
Changes
116
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
235 additions
and
57 deletions
+235
-57
official/utils/testing/scripts/presubmit.sh
official/utils/testing/scripts/presubmit.sh
+76
-0
official/wide_deep/wide_deep.py
official/wide_deep/wide_deep.py
+23
-17
official/wide_deep/wide_deep_test.py
official/wide_deep/wide_deep_test.py
+33
-0
research/deeplab/deeplab_demo.ipynb
research/deeplab/deeplab_demo.ipynb
+10
-10
research/deeplab/train.py
research/deeplab/train.py
+1
-1
research/deeplab/vis.py
research/deeplab/vis.py
+1
-1
research/slim/BUILD
research/slim/BUILD
+24
-0
research/slim/README.md
research/slim/README.md
+4
-2
research/slim/datasets/build_imagenet_data.py
research/slim/datasets/build_imagenet_data.py
+1
-2
research/slim/datasets/preprocess_imagenet_validation_data.py
...arch/slim/datasets/preprocess_imagenet_validation_data.py
+2
-2
research/slim/datasets/process_bounding_boxes.py
research/slim/datasets/process_bounding_boxes.py
+1
-3
research/slim/deployment/model_deploy.py
research/slim/deployment/model_deploy.py
+4
-3
research/slim/nets/cyclegan.py
research/slim/nets/cyclegan.py
+1
-2
research/slim/nets/dcgan.py
research/slim/nets/dcgan.py
+2
-3
research/slim/nets/dcgan_test.py
research/slim/nets/dcgan_test.py
+2
-2
research/slim/nets/inception_v4_test.py
research/slim/nets/inception_v4_test.py
+1
-1
research/slim/nets/mobilenet/README.md
research/slim/nets/mobilenet/README.md
+48
-7
research/slim/nets/mobilenet/madds_top1_accuracy.png
research/slim/nets/mobilenet/madds_top1_accuracy.png
+0
-0
research/slim/nets/mobilenet/mnet_v1_vs_v2_pixel1_latency.png
...arch/slim/nets/mobilenet/mnet_v1_vs_v2_pixel1_latency.png
+0
-0
research/slim/nets/mobilenet/mobilenet.py
research/slim/nets/mobilenet/mobilenet.py
+1
-1
No files found.
official/utils/testing/scripts/presubmit.sh
0 → 100755
View file @
1f4747a4
#!/bin/bash
# Presubmit script that run tests and lint under local environment.
# Make sure that tensorflow and pylint is installed.
# usage: models >: ./official/utils/testing/scripts/presubmit.sh
# usage: models >: ./official/utils/testing/scripts/presubmit.sh lint py2_test py3_test
set
+x
SCRIPT_DIR
=
"
$(
cd
"
$(
dirname
"
${
BASH_SOURCE
[0]
}
"
)
"
&&
pwd
)
"
cd
"
$SCRIPT_DIR
/../../../.."
MODEL_ROOT
=
"
$(
pwd
)
"
export
PYTHONPATH
=
"
$PYTHONPATH
:
${
MODEL_ROOT
}
"
cd
official
lint
()
{
local
exit_code
=
0
RC_FILE
=
"utils/testing/pylint.rcfile"
echo
"===========Running lint test============"
for
file
in
`
find
.
-name
'*.py'
!
-name
'*test.py'
-print
`
do
echo
"Linting
${
file
}
"
pylint
--rcfile
=
"
${
RC_FILE
}
"
"
${
file
}
"
||
exit_code
=
$?
done
# More lenient for test files.
for
file
in
`
find
.
-name
'*test.py'
-print
`
do
echo
"Linting
${
file
}
"
pylint
--rcfile
=
"
${
RC_FILE
}
"
--disable
=
missing-docstring,protected-access
"
${
file
}
"
||
exit_code
=
$?
done
return
"
${
exit_code
}
"
}
py_test
()
{
local
PY_BINARY
=
"
$1
"
local
exit_code
=
0
echo
"===========Running Python test============"
for
test_file
in
`
find
.
-name
'*test.py'
-print
`
do
echo
"Testing
${
test_file
}
"
${
PY_BINARY
}
"
${
test_file
}
"
||
exit_code
=
$?
done
return
"
${
exit_code
}
"
}
py2_test
()
{
local
PY_BINARY
=
$(
which python2
)
return
$(
py_test
"
${
PY_BINARY
}
"
)
}
py3_test
()
{
local
PY_BINARY
=
$(
which python3
)
return
$(
py_test
"
${
PY_BINARY
}
"
)
}
test_result
=
0
if
[
"$#"
-eq
0
]
;
then
TESTS
=
"lint py2_test py3_test"
else
TESTS
=
"
$@
"
fi
for
t
in
"
${
TESTS
}
"
;
do
${
t
}
||
test_result
=
$?
done
exit
"
${
test_result
}
"
official/wide_deep/wide_deep.py
View file @
1f4747a4
...
...
@@ -43,6 +43,9 @@ _NUM_EXAMPLES = {
}
LOSS_PREFIX
=
{
'wide'
:
'linear/'
,
'deep'
:
'dnn/'
}
def
build_model_columns
():
"""Builds a set of wide and deep feature columns."""
# Continuous columns
...
...
@@ -171,33 +174,38 @@ def input_fn(data_file, num_epochs, shuffle, batch_size):
return
dataset
def
main
(
_
):
def
main
(
argv
):
parser
=
WideDeepArgParser
()
flags
=
parser
.
parse_args
(
args
=
argv
[
1
:])
# Clean up the model directory if present
shutil
.
rmtree
(
FLAGS
.
model_dir
,
ignore_errors
=
True
)
model
=
build_estimator
(
FLAGS
.
model_dir
,
FLAGS
.
model_type
)
shutil
.
rmtree
(
flags
.
model_dir
,
ignore_errors
=
True
)
model
=
build_estimator
(
flags
.
model_dir
,
flags
.
model_type
)
train_file
=
os
.
path
.
join
(
FLAGS
.
data_dir
,
'adult.data'
)
test_file
=
os
.
path
.
join
(
FLAGS
.
data_dir
,
'adult.test'
)
train_file
=
os
.
path
.
join
(
flags
.
data_dir
,
'adult.data'
)
test_file
=
os
.
path
.
join
(
flags
.
data_dir
,
'adult.test'
)
# Train and evaluate the model every `
FLAGS
.epochs_
per
_eval` epochs.
# Train and evaluate the model every `
flags
.epochs_
between
_eval
s
` epochs.
def
train_input_fn
():
return
input_fn
(
train_file
,
FLAGS
.
epochs_per_eval
,
True
,
FLAGS
.
batch_size
)
return
input_fn
(
train_file
,
flags
.
epochs_between_evals
,
True
,
flags
.
batch_size
)
def
eval_input_fn
():
return
input_fn
(
test_file
,
1
,
False
,
FLAGS
.
batch_size
)
return
input_fn
(
test_file
,
1
,
False
,
flags
.
batch_size
)
loss_prefix
=
LOSS_PREFIX
.
get
(
flags
.
model_type
,
''
)
train_hooks
=
hooks_helper
.
get_train_hooks
(
FLAGS
.
hooks
,
batch_size
=
FLAGS
.
batch_size
,
tensors_to_log
=
{
'average_loss'
:
'head/truediv'
,
'loss'
:
'head/weighted_loss/Sum'
})
flags
.
hooks
,
batch_size
=
flags
.
batch_size
,
tensors_to_log
=
{
'average_loss'
:
loss_prefix
+
'head/truediv'
,
'loss'
:
loss_prefix
+
'head/weighted_loss/Sum'
})
# Train and evaluate the model every `
FLAGS
.epochs_between_evals` epochs.
for
n
in
range
(
FLAGS
.
train_epochs
//
FLAGS
.
epochs_between_evals
):
# Train and evaluate the model every `
flags
.epochs_between_evals` epochs.
for
n
in
range
(
flags
.
train_epochs
//
flags
.
epochs_between_evals
):
model
.
train
(
input_fn
=
train_input_fn
,
hooks
=
train_hooks
)
results
=
model
.
evaluate
(
input_fn
=
eval_input_fn
)
# Display evaluation metrics
print
(
'Results at epoch'
,
(
n
+
1
)
*
FLAGS
.
epochs_between_evals
)
print
(
'Results at epoch'
,
(
n
+
1
)
*
flags
.
epochs_between_evals
)
print
(
'-'
*
60
)
for
key
in
sorted
(
results
):
...
...
@@ -224,6 +232,4 @@ class WideDeepArgParser(argparse.ArgumentParser):
if
__name__
==
'__main__'
:
tf
.
logging
.
set_verbosity
(
tf
.
logging
.
INFO
)
parser
=
WideDeepArgParser
()
FLAGS
,
unparsed
=
parser
.
parse_known_args
()
tf
.
app
.
run
(
main
=
main
,
argv
=
[
sys
.
argv
[
0
]]
+
unparsed
)
main
(
argv
=
sys
.
argv
)
official/wide_deep/wide_deep_test.py
View file @
1f4747a4
...
...
@@ -21,6 +21,7 @@ import os
import
tensorflow
as
tf
# pylint: disable=g-bad-import-order
from
official.utils.testing
import
integration
from
official.wide_deep
import
wide_deep
tf
.
logging
.
set_verbosity
(
tf
.
logging
.
ERROR
)
...
...
@@ -54,6 +55,14 @@ class BaseTest(tf.test.TestCase):
with
tf
.
gfile
.
Open
(
self
.
input_csv
,
'w'
)
as
temp_csv
:
temp_csv
.
write
(
TEST_INPUT
)
with
tf
.
gfile
.
Open
(
TEST_CSV
,
"r"
)
as
temp_csv
:
test_csv_contents
=
temp_csv
.
read
()
# Used for end-to-end tests.
for
fname
in
[
'adult.data'
,
'adult.test'
]:
with
tf
.
gfile
.
Open
(
os
.
path
.
join
(
self
.
temp_dir
,
fname
),
'w'
)
as
test_csv
:
test_csv
.
write
(
test_csv_contents
)
def
test_input_fn
(
self
):
dataset
=
wide_deep
.
input_fn
(
self
.
input_csv
,
1
,
False
,
1
)
features
,
labels
=
dataset
.
make_one_shot_iterator
().
get_next
()
...
...
@@ -107,6 +116,30 @@ class BaseTest(tf.test.TestCase):
def
test_wide_deep_estimator_training
(
self
):
self
.
build_and_test_estimator
(
'wide_deep'
)
def
test_end_to_end_wide
(
self
):
integration
.
run_synthetic
(
main
=
wide_deep
.
main
,
tmp_root
=
self
.
get_temp_dir
(),
extra_flags
=
[
'--data_dir'
,
self
.
get_temp_dir
(),
'--model_type'
,
'wide'
,
],
synth
=
False
,
max_train
=
None
)
def
test_end_to_end_deep
(
self
):
integration
.
run_synthetic
(
main
=
wide_deep
.
main
,
tmp_root
=
self
.
get_temp_dir
(),
extra_flags
=
[
'--data_dir'
,
self
.
get_temp_dir
(),
'--model_type'
,
'deep'
,
],
synth
=
False
,
max_train
=
None
)
def
test_end_to_end_wide_deep
(
self
):
integration
.
run_synthetic
(
main
=
wide_deep
.
main
,
tmp_root
=
self
.
get_temp_dir
(),
extra_flags
=
[
'--data_dir'
,
self
.
get_temp_dir
(),
'--model_type'
,
'wide_deep'
,
],
synth
=
False
,
max_train
=
None
)
if
__name__
==
'__main__'
:
tf
.
test
.
main
()
research/deeplab/deeplab_demo.ipynb
View file @
1f4747a4
...
...
@@ -31,10 +31,10 @@
"#@title Imports\n",
"\n",
"import os\n",
"import
String
IO\n",
"
from io
import
Bytes
IO\n",
"import tarfile\n",
"import tempfile\n",
"import urllib\n",
"
from six.moves
import urllib\n",
"\n",
"from matplotlib import gridspec\n",
"from matplotlib import pyplot as plt\n",
...
...
@@ -238,13 +238,13 @@
"tf.gfile.MakeDirs(model_dir)\n",
"\n",
"download_path = os.path.join(model_dir, _TARBALL_NAME)\n",
"print
'downloading model, this might take a while...'\n",
"urllib.urlretrieve(_DOWNLOAD_URL_PREFIX + _MODEL_URLS[MODEL_NAME],\n",
"print
(
'downloading model, this might take a while...'
)
\n",
"urllib.
request.
urlretrieve(_DOWNLOAD_URL_PREFIX + _MODEL_URLS[MODEL_NAME],\n",
" download_path)\n",
"print
'download completed! loading DeepLab model...'\n",
"print
(
'download completed! loading DeepLab model...'
)
\n",
"\n",
"MODEL = DeepLabModel(download_path)\n",
"print
'model loaded successfully!'"
"print
(
'model loaded successfully!'
)
"
]
},
{
...
...
@@ -292,14 +292,14 @@
"def run_visualization(url):\n",
" \"\"\"Inferences DeepLab model and visualizes result.\"\"\"\n",
" try:\n",
" f = urllib.urlopen(url)\n",
" f = urllib.
request.
urlopen(url)\n",
" jpeg_str = f.read()\n",
" orignal_im = Image.open(
StringIO.String
IO(jpeg_str))\n",
" orignal_im = Image.open(
Bytes
IO(jpeg_str))\n",
" except IOError:\n",
" print
'Cannot retrieve image. Please check url: ' + url\n",
" print
(
'Cannot retrieve image. Please check url: ' + url
)
\n",
" return\n",
"\n",
" print
'running deeplab on image %s...' % url\n",
" print
(
'running deeplab on image %s...' % url
)
\n",
" resized_im, seg_map = MODEL.run(orignal_im)\n",
"\n",
" vis_segmentation(resized_im, seg_map)\n",
...
...
research/deeplab/train.py
View file @
1f4747a4
...
...
@@ -66,7 +66,7 @@ flags.DEFINE_integer('save_interval_secs', 1200,
flags
.
DEFINE_integer
(
'save_summaries_secs'
,
600
,
'How often, in seconds, we compute the summaries.'
)
# Settings for training strateg
r
y.
# Settings for training strategy.
flags
.
DEFINE_enum
(
'learning_policy'
,
'poly'
,
[
'poly'
,
'step'
],
'Learning rate policy for training.'
)
...
...
research/deeplab/vis.py
View file @
1f4747a4
...
...
@@ -173,7 +173,7 @@ def _process_batch(sess, original_images, semantic_predictions, image_names,
colormap_type
=
FLAGS
.
colormap_type
)
if
FLAGS
.
also_save_raw_predictions
:
image_filename
=
image_names
[
i
]
image_filename
=
os
.
path
.
basename
(
image_names
[
i
]
)
if
train_id_to_eval_id
is
not
None
:
crop_semantic_prediction
=
_convert_train_id_to_eval_id
(
...
...
research/slim/BUILD
View file @
1f4747a4
...
...
@@ -205,6 +205,7 @@ py_library(
":nasnet"
,
":overfeat"
,
":pix2pix"
,
":pnasnet"
,
":resnet_v1"
,
":resnet_v2"
,
":vgg"
,
...
...
@@ -533,6 +534,29 @@ py_test(
],
)
py_library
(
name
=
"pnasnet"
,
srcs
=
[
"nets/nasnet/pnasnet.py"
],
srcs_version
=
"PY2AND3"
,
deps
=
[
":nasnet"
,
":nasnet_utils"
,
# "//tensorflow",
],
)
py_test
(
name
=
"pnasnet_test"
,
size
=
"large"
,
srcs
=
[
"nets/nasnet/pnasnet_test.py"
],
shard_count
=
4
,
srcs_version
=
"PY2AND3"
,
deps
=
[
":pnasnet"
,
# "//tensorflow",
],
)
py_library
(
name
=
"overfeat"
,
srcs
=
[
"nets/overfeat.py"
],
...
...
research/slim/README.md
View file @
1f4747a4
...
...
@@ -259,9 +259,11 @@ Model | TF-Slim File | Checkpoint | Top-1 Accuracy| Top-5 Accuracy |
[
MobileNet_v1_1.0_224
](
https://arxiv.org/pdf/1704.04861.pdf
)
|
[
Code
](
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py
)
|
[
mobilenet_v1_1.0_224.tgz
](
http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz
)
|70.9|89.9|
[
MobileNet_v1_0.50_160
](
https://arxiv.org/pdf/1704.04861.pdf
)
|
[
Code
](
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py
)
|
[
mobilenet_v1_0.50_160.tgz
](
http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.5_160.tgz
)
|59.1|81.9|
[
MobileNet_v1_0.25_128
](
https://arxiv.org/pdf/1704.04861.pdf
)
|
[
Code
](
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.py
)
|
[
mobilenet_v1_0.25_128.tgz
](
http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz
)
|41.5|66.3|
[
MobileNet_v2_1.0_224^*
](
https://arxiv.org/abs/1801.04381
)
|
[
Code
](
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py
)
|
[
Checkpoint TBA
](
)|72.2|91.0|
[
MobileNet_v2_1.4_224^*
](
https://arxiv.org/abs/1801.04381
)
|
[
Code
](
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py
)
|
[
mobilenet_v2_1.4_224.tgz
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz
)
| 74.9 | 92.5|
[
MobileNet_v2_1.0_224^*
](
https://arxiv.org/abs/1801.04381
)
|
[
Code
](
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py
)
|
[
mobilenet_v2_1.0_224.tgz
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_224.tgz
)
| 71.9 | 91.0
[
NASNet-A_Mobile_224
](
https://arxiv.org/abs/1707.07012
)
#|
[
Code
](
https://github.com/tensorflow/models/blob/master/research/slim/nets/nasnet/nasnet.py
)
|
[
nasnet-a_mobile_04_10_2017.tar.gz
](
https://storage.googleapis.com/download.tensorflow.org/models/nasnet-a_mobile_04_10_2017.tar.gz
)
|74.0|91.6|
[
NASNet-A_Large_331
](
https://arxiv.org/abs/1707.07012
)
#|
[
Code
](
https://github.com/tensorflow/models/blob/master/research/slim/nets/nasnet/nasnet.py
)
|
[
nasnet-a_large_04_10_2017.tar.gz
](
https://storage.googleapis.com/download.tensorflow.org/models/nasnet-a_large_04_10_2017.tar.gz
)
|82.7|96.2|
[
PNASNet-5_Large_331
](
https://arxiv.org/abs/1712.00559
)
|
[
Code
](
https://github.com/tensorflow/models/blob/master/research/slim/nets/nasnet/pnasnet.py
)
|
[
pnasnet-5_large_2017_12_13.tar.gz
](
https://storage.googleapis.com/download.tensorflow.org/models/pnasnet-5_large_2017_12_13.tar.gz
)
|82.9|96.2|
^ ResNet V2 models use Inception pre-processing and input image size of 299 (use
`--preprocessing_name inception --eval_image_size 299`
when using
...
...
@@ -274,7 +276,7 @@ All 16 float MobileNet V1 models reported in the [MobileNet Paper](https://arxiv
16 quantized
[
TensorFlow Lite
](
https://www.tensorflow.org/mobile/tflite/
)
compatible MobileNet V1 models can be found
[
here
](
https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet_v1.md
)
.
(^#) More details on Mobile
n
et
V2 models can be found
[
here
](
nets/mobilenet/README.md
)
.
(^#) More details on Mobile
N
etV2 models can be found
[
here
](
nets/mobilenet/README.md
)
.
(
\*
): Results quoted from the
[
paper
](
https://arxiv.org/abs/1603.05027
)
.
...
...
research/slim/datasets/build_imagenet_data.py
View file @
1f4747a4
...
...
@@ -93,9 +93,8 @@ import sys
import
threading
import
numpy
as
np
from
six.moves
import
xrange
from
six.moves
import
xrange
# pylint: disable=redefined-builtin
import
tensorflow
as
tf
from
six.moves
import
xrange
tf
.
app
.
flags
.
DEFINE_string
(
'train_directory'
,
'/tmp/'
,
...
...
research/slim/datasets/preprocess_imagenet_validation_data.py
View file @
1f4747a4
...
...
@@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
r
"""Process the ImageNet Challenge bounding boxes for TensorFlow model training.
Associate the ImageNet 2012 Challenge validation data set with labels.
...
...
@@ -51,7 +51,7 @@ from __future__ import print_function
import
os
import
sys
from
six.moves
import
xrange
from
six.moves
import
xrange
# pylint: disable=redefined-builtin
if
__name__
==
'__main__'
:
...
...
research/slim/datasets/process_bounding_boxes.py
View file @
1f4747a4
...
...
@@ -85,9 +85,7 @@ import glob
import
os.path
import
sys
import
xml.etree.ElementTree
as
ET
from
six.moves
import
xrange
from
six.moves
import
xrange
from
six.moves
import
xrange
# pylint: disable=redefined-builtin
class
BoundingBox
(
object
):
...
...
research/slim/deployment/model_deploy.py
View file @
1f4747a4
...
...
@@ -230,10 +230,11 @@ def _gather_clone_loss(clone, num_clones, regularization_losses):
sum_loss
=
tf
.
add_n
(
all_losses
)
# Add the summaries out of the clone device block.
if
clone_loss
is
not
None
:
tf
.
summary
.
scalar
(
clone
.
scope
+
'/clone_loss'
,
clone_loss
,
family
=
'Losses'
)
tf
.
summary
.
scalar
(
'/'
.
join
(
filter
(
None
,
[
'Losses'
,
clone
.
scope
,
'clone_loss'
])),
clone_loss
)
if
regularization_loss
is
not
None
:
tf
.
summary
.
scalar
(
'regularization_loss'
,
regularization_loss
,
family
=
'Losses'
)
tf
.
summary
.
scalar
(
'Losses/regularization_loss'
,
regularization_loss
)
return
sum_loss
...
...
research/slim/nets/cyclegan.py
View file @
1f4747a4
...
...
@@ -18,9 +18,8 @@ from __future__ import division
from
__future__
import
print_function
import
numpy
as
np
from
six.moves
import
xrange
from
six.moves
import
xrange
# pylint: disable=redefined-builtin
import
tensorflow
as
tf
from
six.moves
import
xrange
layers
=
tf
.
contrib
.
layers
...
...
research/slim/nets/dcgan.py
View file @
1f4747a4
...
...
@@ -19,10 +19,9 @@ from __future__ import print_function
from
math
import
log
from
six.moves
import
xrange
from
six.moves
import
xrange
# pylint: disable=redefined-builtin
import
tensorflow
as
tf
from
six.moves
import
xrange
slim
=
tf
.
contrib
.
slim
...
...
research/slim/nets/dcgan_test.py
View file @
1f4747a4
...
...
@@ -18,9 +18,9 @@ from __future__ import absolute_import
from
__future__
import
division
from
__future__
import
print_function
from
six.moves
import
xrange
from
six.moves
import
xrange
# pylint: disable=redefined-builtin
import
tensorflow
as
tf
from
six.moves
import
xrange
from
nets
import
dcgan
...
...
research/slim/nets/inception_v4_test.py
View file @
1f4747a4
...
...
@@ -127,7 +127,7 @@ class InceptionTest(tf.test.TestCase):
'Mixed_6e'
,
'Mixed_6f'
,
'Mixed_6g'
,
'Mixed_6h'
,
'Mixed_7a'
,
'Mixed_7b'
,
'Mixed_7c'
,
'Mixed_7d'
]
self
.
assertItemsEqual
(
end_points
.
keys
(),
expected_endpoints
)
for
name
,
op
in
end_points
.
iter
items
():
for
name
,
op
in
end_points
.
items
():
self
.
assertTrue
(
op
.
name
.
startswith
(
'InceptionV4/'
+
name
))
def
testBuildOnlyUpToFinalEndpoint
(
self
):
...
...
research/slim/nets/mobilenet/README.md
View file @
1f4747a4
# Mobilenet V2
This folder contains building code for Mobilenet V2, based on
[Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation]
(https://arxiv.org/abs/1801.04381)
# MobileNetV2
This folder contains building code for MobileNetV2, based on
[
MobileNetV2: Inverted Residuals and Linear Bottlenecks
](
https://arxiv.org/abs/1801.04381
)
# Pretrained model
TODO
# Performance
## Latency
This is the timing of
[
MobileNetV1
](
../mobilenet_v1.md
)
vs MobileNetV2 using
TF-Lite on the large core of Pixel 1 phone.

## MACs
MACs, also sometimes known as MADDs - the number of multiply-accumulates needed
to compute an inference on a single image is a common metric to measure the efficiency of the model.
Below is the graph comparing V2 vs a few selected networks. The size
of each blob represents the number of parameters. Note for
[
ShuffleNet
](
https://arxiv.org/abs/1707.01083
)
there
are no published size numbers. We estimate it to be comparable to MobileNetV2 numbers.

# Pretrained models
## Imagenet Checkpoints
Classification Checkpoint | MACs (M)| Parameters (M)| Top 1 Accuracy| Top 5 Accuracy | Mobile CPU (ms) Pixel 1
---------------------------|---------|---------------|---------|----|-------------
|
[
mobilenet_v2_1.4_224
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz
)
| 582 | 6.06 | 75.0 | 92.5 | 138.0
|
[
mobilenet_v2_1.3_224
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.3_224.tgz
)
| 509 | 5.34 | 74.4 | 92.1 | 123.0
|
[
mobilenet_v2_1.0_224
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_224.tgz
)
| 300 | 3.47 | 71.8 | 91.0 | 73.8
|
[
mobilenet_v2_1.0_192
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_192.tgz
)
| 221 | 3.47 | 70.7 | 90.1 | 55.1
|
[
mobilenet_v2_1.0_160
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_160.tgz
)
| 154 | 3.47 | 68.8 | 89.0 | 40.2
|
[
mobilenet_v2_1.0_128
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_128.tgz
)
| 99 | 3.47 | 65.3 | 86.9 | 27.6
|
[
mobilenet_v2_1.0_96
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_96.tgz
)
| 56 | 3.47 | 60.3 | 83.2 | 17.6
|
[
mobilenet_v2_0.75_224
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.75_224.tgz
)
| 209 | 2.61 | 69.8 | 89.6 | 55.8
|
[
mobilenet_v2_0.75_192
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.75_192.tgz
)
| 153 | 2.61 | 68.7 | 88.9 | 41.6
|
[
mobilenet_v2_0.75_160
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.75_160.tgz
)
| 107 | 2.61 | 66.4 | 87.3 | 30.4
|
[
mobilenet_v2_0.75_128
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.75_128.tgz
)
| 69 | 2.61 | 63.2 | 85.3 | 21.9
|
[
mobilenet_v2_0.75_96
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.75_96.tgz
)
| 39 | 2.61 | 58.8 | 81.6 | 14.2
|
[
mobilenet_v2_0.5_224
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.5_224.tgz
)
| 97 | 1.95 | 65.4 | 86.4 | 28.7
|
[
mobilenet_v2_0.5_192
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.5_192.tgz
)
| 71 | 1.95 | 63.9 | 85.4 | 21.1
|
[
mobilenet_v2_0.5_160
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.5_160.tgz
)
| 50 | 1.95 | 61.0 | 83.2 | 14.9
|
[
mobilenet_v2_0.5_128
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.5_128.tgz
)
| 32 | 1.95 | 57.7 | 80.8 | 9.9
|
[
mobilenet_v2_0.5_96
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.5_96.tgz
)
| 18 | 1.95 | 51.2 | 75.8 | 6.4
|
[
mobilenet_v2_0.35_224
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.35_224.tgz
)
| 59 | 1.66 | 60.3 | 82.9 | 19.7
|
[
mobilenet_v2_0.35_192
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.35_192.tgz
)
| 43 | 1.66 | 58.2 | 81.2 | 14.6
|
[
mobilenet_v2_0.35_160
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.35_160.tgz
)
| 30 | 1.66 | 55.7 | 79.1 | 10.5
|
[
mobilenet_v2_0.35_128
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.35_128.tgz
)
| 20 | 1.66 | 50.8 | 75.0 | 6.9
|
[
mobilenet_v2_0.35_96
](
https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.35_96.tgz
)
| 11 | 1.66 | 45.5 | 70.4 | 4.5
# Example
TODO
See this
[
ipython notebook
](
mobilenet_example.ipynb
)
or open and run the network directly in
[
Colaboratory
](
https://colab.research.google.com/github/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_example.ipynb
)
.
research/slim/nets/mobilenet/madds_top1_accuracy.png
0 → 100644
View file @
1f4747a4
98.6 KB
research/slim/nets/mobilenet/mnet_v1_vs_v2_pixel1_latency.png
0 → 100644
View file @
1f4747a4
61.7 KB
research/slim/nets/mobilenet/mobilenet.py
View file @
1f4747a4
...
...
@@ -81,7 +81,7 @@ def _set_arg_scope_defaults(defaults):
context manager where all defaults are set.
"""
if
hasattr
(
defaults
,
'items'
):
items
=
defaults
.
items
()
items
=
list
(
defaults
.
items
()
)
else
:
items
=
defaults
if
not
items
:
...
...
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment