Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
43178d7f
"...git@developer.sourcefind.cn:dadigang/Ventoy.git" did not exist on "471432fc50ffad80bde5de0b22e4c30fa3aac41b"
Unverified
Commit
43178d7f
authored
Mar 04, 2020
by
Ayushman Kumar
Committed by
GitHub
Mar 04, 2020
Browse files
Merge pull request
#1
from tensorflow/master
Updated
parents
8b47aa3d
75d13042
Changes
80
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
138 additions
and
715 deletions
+138
-715
research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor.py
...ls/lstm_ssd_interleaved_mobilenet_v2_feature_extractor.py
+10
-3
research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor_test.py
...tm_ssd_interleaved_mobilenet_v2_feature_extractor_test.py
+42
-1
research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor.py
...tection/models/lstm_ssd_mobilenet_v1_feature_extractor.py
+10
-3
research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor_test.py
...on/models/lstm_ssd_mobilenet_v1_feature_extractor_test.py
+49
-9
research/lstm_object_detection/models/mobilenet_defs.py
research/lstm_object_detection/models/mobilenet_defs.py
+1
-1
research/lstm_object_detection/models/mobilenet_defs_test.py
research/lstm_object_detection/models/mobilenet_defs_test.py
+1
-1
research/lstm_object_detection/test_tflite_model.py
research/lstm_object_detection/test_tflite_model.py
+1
-1
research/lstm_object_detection/tflite/BUILD
research/lstm_object_detection/tflite/BUILD
+6
-2
research/lstm_object_detection/tflite/WORKSPACE
research/lstm_object_detection/tflite/WORKSPACE
+12
-0
research/lstm_object_detection/train.py
research/lstm_object_detection/train.py
+1
-1
research/lstm_object_detection/trainer.py
research/lstm_object_detection/trainer.py
+3
-2
research/lstm_object_detection/utils/config_util.py
research/lstm_object_detection/utils/config_util.py
+1
-1
research/lstm_object_detection/utils/config_util_test.py
research/lstm_object_detection/utils/config_util_test.py
+1
-1
samples/cookbook/regression/__init__.py
samples/cookbook/regression/__init__.py
+0
-20
samples/cookbook/regression/automobile_data.py
samples/cookbook/regression/automobile_data.py
+0
-127
samples/cookbook/regression/custom_regression.py
samples/cookbook/regression/custom_regression.py
+0
-163
samples/cookbook/regression/dnn_regression.py
samples/cookbook/regression/dnn_regression.py
+0
-97
samples/cookbook/regression/linear_regression.py
samples/cookbook/regression/linear_regression.py
+0
-103
samples/cookbook/regression/linear_regression_categorical.py
samples/cookbook/regression/linear_regression_categorical.py
+0
-106
samples/cookbook/regression/regression_test.py
samples/cookbook/regression/regression_test.py
+0
-73
No files found.
research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor.py
View file @
43178d7f
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
"""LSTDInterleavedFeatureExtractor which interleaves multiple MobileNet V2."""
"""LSTDInterleavedFeatureExtractor which interleaves multiple MobileNet V2."""
import
tensorflow
as
tf
import
tensorflow
.compat.v1
as
tf
from
tensorflow.contrib
import
slim
from
tensorflow.contrib
import
slim
from
tensorflow.python.framework
import
ops
as
tf_ops
from
tensorflow.python.framework
import
ops
as
tf_ops
...
@@ -64,8 +64,15 @@ class LSTMSSDInterleavedMobilenetV2FeatureExtractor(
...
@@ -64,8 +64,15 @@ class LSTMSSDInterleavedMobilenetV2FeatureExtractor(
`conv_hyperparams_fn`.
`conv_hyperparams_fn`.
"""
"""
super
(
LSTMSSDInterleavedMobilenetV2FeatureExtractor
,
self
).
__init__
(
super
(
LSTMSSDInterleavedMobilenetV2FeatureExtractor
,
self
).
__init__
(
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
is_training
=
is_training
,
conv_hyperparams_fn
,
reuse_weights
,
use_explicit_padding
,
use_depthwise
,
depth_multiplier
=
depth_multiplier
,
min_depth
=
min_depth
,
pad_to_multiple
=
pad_to_multiple
,
conv_hyperparams_fn
=
conv_hyperparams_fn
,
reuse_weights
=
reuse_weights
,
use_explicit_padding
=
use_explicit_padding
,
use_depthwise
=
use_depthwise
,
override_base_feature_extractor_hyperparams
=
override_base_feature_extractor_hyperparams
)
override_base_feature_extractor_hyperparams
)
# RANDOM_SKIP_SMALL means the training policy is random and the small model
# RANDOM_SKIP_SMALL means the training policy is random and the small model
# does not update state during training.
# does not update state during training.
...
...
research/lstm_object_detection/models/lstm_ssd_interleaved_mobilenet_v2_feature_extractor_test.py
View file @
43178d7f
...
@@ -17,7 +17,7 @@
...
@@ -17,7 +17,7 @@
import
itertools
import
itertools
import
numpy
as
np
import
numpy
as
np
import
tensorflow
as
tf
import
tensorflow
.compat.v1
as
tf
from
tensorflow.contrib
import
slim
from
tensorflow.contrib
import
slim
from
tensorflow.contrib
import
training
as
contrib_training
from
tensorflow.contrib
import
training
as
contrib_training
...
@@ -60,6 +60,47 @@ class LSTMSSDInterleavedMobilenetV2FeatureExtractorTest(
...
@@ -60,6 +60,47 @@ class LSTMSSDInterleavedMobilenetV2FeatureExtractorTest(
feature_extractor
.
is_quantized
=
is_quantized
feature_extractor
.
is_quantized
=
is_quantized
return
feature_extractor
return
feature_extractor
def
test_feature_extractor_construct_with_expected_params
(
self
):
def
conv_hyperparams_fn
():
with
(
slim
.
arg_scope
([
slim
.
conv2d
],
normalizer_fn
=
slim
.
batch_norm
)
and
slim
.
arg_scope
([
slim
.
batch_norm
],
decay
=
0.97
,
epsilon
=
1e-3
))
as
sc
:
return
sc
params
=
{
'is_training'
:
True
,
'depth_multiplier'
:
.
55
,
'min_depth'
:
9
,
'pad_to_multiple'
:
3
,
'conv_hyperparams_fn'
:
conv_hyperparams_fn
,
'reuse_weights'
:
False
,
'use_explicit_padding'
:
True
,
'use_depthwise'
:
False
,
'override_base_feature_extractor_hyperparams'
:
True
}
feature_extractor
=
(
lstm_ssd_interleaved_mobilenet_v2_feature_extractor
.
LSTMSSDInterleavedMobilenetV2FeatureExtractor
(
**
params
))
self
.
assertEqual
(
params
[
'is_training'
],
feature_extractor
.
_is_training
)
self
.
assertEqual
(
params
[
'depth_multiplier'
],
feature_extractor
.
_depth_multiplier
)
self
.
assertEqual
(
params
[
'min_depth'
],
feature_extractor
.
_min_depth
)
self
.
assertEqual
(
params
[
'pad_to_multiple'
],
feature_extractor
.
_pad_to_multiple
)
self
.
assertEqual
(
params
[
'conv_hyperparams_fn'
],
feature_extractor
.
_conv_hyperparams_fn
)
self
.
assertEqual
(
params
[
'reuse_weights'
],
feature_extractor
.
_reuse_weights
)
self
.
assertEqual
(
params
[
'use_explicit_padding'
],
feature_extractor
.
_use_explicit_padding
)
self
.
assertEqual
(
params
[
'use_depthwise'
],
feature_extractor
.
_use_depthwise
)
self
.
assertEqual
(
params
[
'override_base_feature_extractor_hyperparams'
],
(
feature_extractor
.
_override_base_feature_extractor_hyperparams
))
def
test_extract_features_returns_correct_shapes_128
(
self
):
def
test_extract_features_returns_correct_shapes_128
(
self
):
image_height
=
128
image_height
=
128
image_width
=
128
image_width
=
128
...
...
research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor.py
View file @
43178d7f
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
"""LSTMSSDFeatureExtractor for MobilenetV1 features."""
"""LSTMSSDFeatureExtractor for MobilenetV1 features."""
import
tensorflow
as
tf
import
tensorflow
.compat.v1
as
tf
from
tensorflow.contrib
import
slim
as
contrib_slim
from
tensorflow.contrib
import
slim
as
contrib_slim
from
tensorflow.python.framework
import
ops
as
tf_ops
from
tensorflow.python.framework
import
ops
as
tf_ops
from
lstm_object_detection.lstm
import
lstm_cells
from
lstm_object_detection.lstm
import
lstm_cells
...
@@ -66,8 +66,15 @@ class LSTMSSDMobileNetV1FeatureExtractor(
...
@@ -66,8 +66,15 @@ class LSTMSSDMobileNetV1FeatureExtractor(
lstm_state_depth: An integter of the depth of the lstm state.
lstm_state_depth: An integter of the depth of the lstm state.
"""
"""
super
(
LSTMSSDMobileNetV1FeatureExtractor
,
self
).
__init__
(
super
(
LSTMSSDMobileNetV1FeatureExtractor
,
self
).
__init__
(
is_training
,
depth_multiplier
,
min_depth
,
pad_to_multiple
,
is_training
=
is_training
,
conv_hyperparams_fn
,
reuse_weights
,
use_explicit_padding
,
use_depthwise
,
depth_multiplier
=
depth_multiplier
,
min_depth
=
min_depth
,
pad_to_multiple
=
pad_to_multiple
,
conv_hyperparams_fn
=
conv_hyperparams_fn
,
reuse_weights
=
reuse_weights
,
use_explicit_padding
=
use_explicit_padding
,
use_depthwise
=
use_depthwise
,
override_base_feature_extractor_hyperparams
=
override_base_feature_extractor_hyperparams
)
override_base_feature_extractor_hyperparams
)
self
.
_feature_map_layout
=
{
self
.
_feature_map_layout
=
{
'from_layer'
:
[
'Conv2d_13_pointwise_lstm'
,
''
,
''
,
''
,
''
],
'from_layer'
:
[
'Conv2d_13_pointwise_lstm'
,
''
,
''
,
''
,
''
],
...
...
research/lstm_object_detection/models/lstm_ssd_mobilenet_v1_feature_extractor_test.py
View file @
43178d7f
...
@@ -16,11 +16,11 @@
...
@@ -16,11 +16,11 @@
"""Tests for models.lstm_ssd_mobilenet_v1_feature_extractor."""
"""Tests for models.lstm_ssd_mobilenet_v1_feature_extractor."""
import
numpy
as
np
import
numpy
as
np
import
tensorflow
as
tf
import
tensorflow
.compat.v1
as
tf
from
tensorflow.contrib
import
slim
as
contrib_slim
from
tensorflow.contrib
import
slim
as
contrib_slim
from
tensorflow.contrib
import
training
as
contrib_training
from
tensorflow.contrib
import
training
as
contrib_training
from
lstm_object_detection.models
import
lstm_ssd_mobilenet_v1_feature_extractor
as
feature_extactor
from
lstm_object_detection.models
import
lstm_ssd_mobilenet_v1_feature_extractor
as
feature_ext
r
actor
from
object_detection.models
import
ssd_feature_extractor_test
from
object_detection.models
import
ssd_feature_extractor_test
slim
=
contrib_slim
slim
=
contrib_slim
...
@@ -48,7 +48,7 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
...
@@ -48,7 +48,7 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
"""
"""
min_depth
=
32
min_depth
=
32
extractor
=
(
extractor
=
(
feature_extactor
.
LSTMSSDMobileNetV1FeatureExtractor
(
feature_ext
r
actor
.
LSTMSSDMobileNetV1FeatureExtractor
(
is_training
,
is_training
,
depth_multiplier
,
depth_multiplier
,
min_depth
,
min_depth
,
...
@@ -58,6 +58,46 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
...
@@ -58,6 +58,46 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
extractor
.
lstm_state_depth
=
int
(
256
*
depth_multiplier
)
extractor
.
lstm_state_depth
=
int
(
256
*
depth_multiplier
)
return
extractor
return
extractor
def
test_feature_extractor_construct_with_expected_params
(
self
):
def
conv_hyperparams_fn
():
with
(
slim
.
arg_scope
([
slim
.
conv2d
],
normalizer_fn
=
slim
.
batch_norm
)
and
slim
.
arg_scope
([
slim
.
batch_norm
],
decay
=
0.97
,
epsilon
=
1e-3
))
as
sc
:
return
sc
params
=
{
'is_training'
:
True
,
'depth_multiplier'
:
.
55
,
'min_depth'
:
9
,
'pad_to_multiple'
:
3
,
'conv_hyperparams_fn'
:
conv_hyperparams_fn
,
'reuse_weights'
:
False
,
'use_explicit_padding'
:
True
,
'use_depthwise'
:
False
,
'override_base_feature_extractor_hyperparams'
:
True
}
extractor
=
(
feature_extractor
.
LSTMSSDMobileNetV1FeatureExtractor
(
**
params
))
self
.
assertEqual
(
params
[
'is_training'
],
extractor
.
_is_training
)
self
.
assertEqual
(
params
[
'depth_multiplier'
],
extractor
.
_depth_multiplier
)
self
.
assertEqual
(
params
[
'min_depth'
],
extractor
.
_min_depth
)
self
.
assertEqual
(
params
[
'pad_to_multiple'
],
extractor
.
_pad_to_multiple
)
self
.
assertEqual
(
params
[
'conv_hyperparams_fn'
],
extractor
.
_conv_hyperparams_fn
)
self
.
assertEqual
(
params
[
'reuse_weights'
],
extractor
.
_reuse_weights
)
self
.
assertEqual
(
params
[
'use_explicit_padding'
],
extractor
.
_use_explicit_padding
)
self
.
assertEqual
(
params
[
'use_depthwise'
],
extractor
.
_use_depthwise
)
self
.
assertEqual
(
params
[
'override_base_feature_extractor_hyperparams'
],
(
extractor
.
_override_base_feature_extractor_hyperparams
))
def
test_extract_features_returns_correct_shapes_256
(
self
):
def
test_extract_features_returns_correct_shapes_256
(
self
):
image_height
=
256
image_height
=
256
image_width
=
256
image_width
=
256
...
@@ -87,8 +127,8 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
...
@@ -87,8 +127,8 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
def
test_preprocess_returns_correct_value_range
(
self
):
def
test_preprocess_returns_correct_value_range
(
self
):
test_image
=
np
.
random
.
rand
(
5
,
128
,
128
,
3
)
test_image
=
np
.
random
.
rand
(
5
,
128
,
128
,
3
)
feature_
extractor
=
self
.
_create_feature_extractor
()
extractor
=
self
.
_create_feature_extractor
()
preprocessed_image
=
feature_
extractor
.
preprocess
(
test_image
)
preprocessed_image
=
extractor
.
preprocess
(
test_image
)
self
.
assertTrue
(
np
.
all
(
np
.
less_equal
(
np
.
abs
(
preprocessed_image
),
1.0
)))
self
.
assertTrue
(
np
.
all
(
np
.
less_equal
(
np
.
abs
(
preprocessed_image
),
1.0
)))
def
test_variables_only_created_in_scope
(
self
):
def
test_variables_only_created_in_scope
(
self
):
...
@@ -96,8 +136,8 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
...
@@ -96,8 +136,8 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
g
=
tf
.
Graph
()
g
=
tf
.
Graph
()
with
g
.
as_default
():
with
g
.
as_default
():
preprocessed_inputs
=
tf
.
placeholder
(
tf
.
float32
,
(
5
,
256
,
256
,
3
))
preprocessed_inputs
=
tf
.
placeholder
(
tf
.
float32
,
(
5
,
256
,
256
,
3
))
feature_
extractor
=
self
.
_create_feature_extractor
()
extractor
=
self
.
_create_feature_extractor
()
feature_
extractor
.
extract_features
(
preprocessed_inputs
)
extractor
.
extract_features
(
preprocessed_inputs
)
variables
=
g
.
get_collection
(
tf
.
GraphKeys
.
GLOBAL_VARIABLES
)
variables
=
g
.
get_collection
(
tf
.
GraphKeys
.
GLOBAL_VARIABLES
)
find_scope
=
False
find_scope
=
False
for
variable
in
variables
:
for
variable
in
variables
:
...
@@ -122,10 +162,10 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
...
@@ -122,10 +162,10 @@ class LstmSsdMobilenetV1FeatureExtractorTest(
input_context
=
{},
input_context
=
{},
initial_states
=
init_state
,
initial_states
=
init_state
,
capacity
=
1
)
capacity
=
1
)
feature_
extractor
=
self
.
_create_feature_extractor
()
extractor
=
self
.
_create_feature_extractor
()
image
=
tf
.
random_uniform
([
5
,
256
,
256
,
3
])
image
=
tf
.
random_uniform
([
5
,
256
,
256
,
3
])
with
tf
.
variable_scope
(
'zero_state'
):
with
tf
.
variable_scope
(
'zero_state'
):
feature_map
=
feature_
extractor
.
extract_features
(
feature_map
=
extractor
.
extract_features
(
image
,
stateful_reader
.
next_batch
)
image
,
stateful_reader
.
next_batch
)
with
tf
.
Session
()
as
sess
:
with
tf
.
Session
()
as
sess
:
sess
.
run
(
tf
.
global_variables_initializer
())
sess
.
run
(
tf
.
global_variables_initializer
())
...
...
research/lstm_object_detection/models/mobilenet_defs.py
View file @
43178d7f
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
# ==============================================================================
# ==============================================================================
"""Definitions for modified MobileNet models used in LSTD."""
"""Definitions for modified MobileNet models used in LSTD."""
import
tensorflow
as
tf
import
tensorflow
.compat.v1
as
tf
from
tensorflow.contrib
import
slim
as
contrib_slim
from
tensorflow.contrib
import
slim
as
contrib_slim
from
nets
import
mobilenet_v1
from
nets
import
mobilenet_v1
...
...
research/lstm_object_detection/models/mobilenet_defs_test.py
View file @
43178d7f
...
@@ -18,7 +18,7 @@ from __future__ import absolute_import
...
@@ -18,7 +18,7 @@ from __future__ import absolute_import
from
__future__
import
division
from
__future__
import
division
from
__future__
import
print_function
from
__future__
import
print_function
import
tensorflow
as
tf
import
tensorflow
.compat.v1
as
tf
from
lstm_object_detection.models
import
mobilenet_defs
from
lstm_object_detection.models
import
mobilenet_defs
from
nets
import
mobilenet_v1
from
nets
import
mobilenet_v1
from
nets.mobilenet
import
mobilenet_v2
from
nets.mobilenet
import
mobilenet_v2
...
...
research/lstm_object_detection/test_tflite_model.py
View file @
43178d7f
...
@@ -18,7 +18,7 @@
...
@@ -18,7 +18,7 @@
from
__future__
import
print_function
from
__future__
import
print_function
from
absl
import
flags
from
absl
import
flags
import
numpy
as
np
import
numpy
as
np
import
tensorflow
as
tf
import
tensorflow
.compat.v1
as
tf
flags
.
DEFINE_string
(
'model_path'
,
None
,
'Path to model.'
)
flags
.
DEFINE_string
(
'model_path'
,
None
,
'Path to model.'
)
FLAGS
=
flags
.
FLAGS
FLAGS
=
flags
.
FLAGS
...
...
research/lstm_object_detection/tflite/BUILD
View file @
43178d7f
...
@@ -50,7 +50,9 @@ cc_library(
...
@@ -50,7 +50,9 @@ cc_library(
"//utils:ssd_utils"
,
"//utils:ssd_utils"
,
]
+
select
({
]
+
select
({
"//conditions:default"
:
[],
"//conditions:default"
:
[],
"enable_edgetpu"
:
[
"@libedgetpu//libedgetpu:header"
],
"enable_edgetpu"
:
[
"@libedgetpu//libedgetpu:header"
,
],
}),
}),
alwayslink
=
1
,
alwayslink
=
1
,
)
)
...
@@ -71,7 +73,9 @@ cc_library(
...
@@ -71,7 +73,9 @@ cc_library(
"@org_tensorflow//tensorflow/lite/kernels:builtin_ops"
,
"@org_tensorflow//tensorflow/lite/kernels:builtin_ops"
,
]
+
select
({
]
+
select
({
"//conditions:default"
:
[],
"//conditions:default"
:
[],
"enable_edgetpu"
:
[
"@libedgetpu//libedgetpu:header"
],
"enable_edgetpu"
:
[
"@libedgetpu//libedgetpu:header"
,
],
}),
}),
alwayslink
=
1
,
alwayslink
=
1
,
)
)
research/lstm_object_detection/tflite/WORKSPACE
View file @
43178d7f
...
@@ -22,6 +22,12 @@ http_archive(
...
@@ -22,6 +22,12 @@ http_archive(
strip_prefix
=
"abseil-cpp-a02f62f456f2c4a7ecf2be3104fe0c6e16fbad9a"
,
strip_prefix
=
"abseil-cpp-a02f62f456f2c4a7ecf2be3104fe0c6e16fbad9a"
,
)
)
http_archive
(
name
=
"rules_cc"
,
strip_prefix
=
"rules_cc-master"
,
urls
=
[
"https://github.com/bazelbuild/rules_cc/archive/master.zip"
],
)
# GoogleTest/GoogleMock framework. Used by most unit-tests.
# GoogleTest/GoogleMock framework. Used by most unit-tests.
http_archive
(
http_archive
(
name
=
"com_google_googletest"
,
name
=
"com_google_googletest"
,
...
@@ -90,6 +96,12 @@ http_archive(
...
@@ -90,6 +96,12 @@ http_archive(
sha256
=
"79d102c61e2a479a0b7e5fc167bcfaa4832a0c6aad4a75fa7da0480564931bcc"
,
sha256
=
"79d102c61e2a479a0b7e5fc167bcfaa4832a0c6aad4a75fa7da0480564931bcc"
,
)
)
#
# http_archive(
# name = "com_google_protobuf",
# strip_prefix = "protobuf-master",
# urls = ["https://github.com/protocolbuffers/protobuf/archive/master.zip"],
# )
# Needed by TensorFlow
# Needed by TensorFlow
http_archive
(
http_archive
(
...
...
research/lstm_object_detection/train.py
View file @
43178d7f
...
@@ -46,7 +46,7 @@ import functools
...
@@ -46,7 +46,7 @@ import functools
import
json
import
json
import
os
import
os
from
absl
import
flags
from
absl
import
flags
import
tensorflow
as
tf
import
tensorflow
.compat.v1
as
tf
from
lstm_object_detection
import
model_builder
from
lstm_object_detection
import
model_builder
from
lstm_object_detection
import
trainer
from
lstm_object_detection
import
trainer
from
lstm_object_detection.inputs
import
seq_dataset_builder
from
lstm_object_detection.inputs
import
seq_dataset_builder
...
...
research/lstm_object_detection/trainer.py
View file @
43178d7f
...
@@ -20,7 +20,8 @@ DetectionModel.
...
@@ -20,7 +20,8 @@ DetectionModel.
"""
"""
import
functools
import
functools
import
tensorflow
as
tf
import
tensorflow.compat.v1
as
tf
from
tensorflow.contrib
import
slim
as
contrib_slim
from
object_detection.builders
import
optimizer_builder
from
object_detection.builders
import
optimizer_builder
from
object_detection.core
import
standard_fields
as
fields
from
object_detection.core
import
standard_fields
as
fields
...
@@ -28,7 +29,7 @@ from object_detection.utils import ops as util_ops
...
@@ -28,7 +29,7 @@ from object_detection.utils import ops as util_ops
from
object_detection.utils
import
variables_helper
from
object_detection.utils
import
variables_helper
from
deployment
import
model_deploy
from
deployment
import
model_deploy
slim
=
tf
.
contrib
.
slim
slim
=
contrib
_
slim
def
create_input_queue
(
create_tensor_dict_fn
):
def
create_input_queue
(
create_tensor_dict_fn
):
...
...
research/lstm_object_detection/utils/config_util.py
View file @
43178d7f
...
@@ -19,7 +19,7 @@ from __future__ import absolute_import
...
@@ -19,7 +19,7 @@ from __future__ import absolute_import
from
__future__
import
division
from
__future__
import
division
from
__future__
import
print_function
from
__future__
import
print_function
import
tensorflow
as
tf
import
tensorflow
.compat.v1
as
tf
from
google.protobuf
import
text_format
from
google.protobuf
import
text_format
from
lstm_object_detection.protos
import
input_reader_google_pb2
# pylint: disable=unused-import
from
lstm_object_detection.protos
import
input_reader_google_pb2
# pylint: disable=unused-import
...
...
research/lstm_object_detection/utils/config_util_test.py
View file @
43178d7f
...
@@ -20,7 +20,7 @@ from __future__ import division
...
@@ -20,7 +20,7 @@ from __future__ import division
from
__future__
import
print_function
from
__future__
import
print_function
import
os
import
os
import
tensorflow
as
tf
import
tensorflow
.compat.v1
as
tf
from
google.protobuf
import
text_format
from
google.protobuf
import
text_format
from
lstm_object_detection.protos
import
pipeline_pb2
as
internal_pipeline_pb2
from
lstm_object_detection.protos
import
pipeline_pb2
as
internal_pipeline_pb2
...
...
samples/cookbook/regression/__init__.py
deleted
100644 → 0
View file @
8b47aa3d
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A collection of regression examples using `Estimators`."""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
samples/cookbook/regression/automobile_data.py
deleted
100644 → 0
View file @
8b47aa3d
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for loading the automobile data set."""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
collections
import
numpy
as
np
import
pandas
as
pd
import
tensorflow
as
tf
URL
=
"https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
COLUMN_TYPES
=
collections
.
OrderedDict
([
(
"symboling"
,
int
),
(
"normalized-losses"
,
float
),
(
"make"
,
str
),
(
"fuel-type"
,
str
),
(
"aspiration"
,
str
),
(
"num-of-doors"
,
str
),
(
"body-style"
,
str
),
(
"drive-wheels"
,
str
),
(
"engine-location"
,
str
),
(
"wheel-base"
,
float
),
(
"length"
,
float
),
(
"width"
,
float
),
(
"height"
,
float
),
(
"curb-weight"
,
float
),
(
"engine-type"
,
str
),
(
"num-of-cylinders"
,
str
),
(
"engine-size"
,
float
),
(
"fuel-system"
,
str
),
(
"bore"
,
float
),
(
"stroke"
,
float
),
(
"compression-ratio"
,
float
),
(
"horsepower"
,
float
),
(
"peak-rpm"
,
float
),
(
"city-mpg"
,
float
),
(
"highway-mpg"
,
float
),
(
"price"
,
float
)
])
def
raw_dataframe
():
"""Load the automobile data set as a pd.DataFrame."""
# Download and cache the data
path
=
tf
.
keras
.
utils
.
get_file
(
URL
.
split
(
"/"
)[
-
1
],
URL
)
# Load it into a pandas DataFrame
df
=
pd
.
read_csv
(
path
,
names
=
COLUMN_TYPES
.
keys
(),
dtype
=
COLUMN_TYPES
,
na_values
=
"?"
)
return
df
def
load_data
(
y_name
=
"price"
,
train_fraction
=
0.7
,
seed
=
None
):
"""Load the automobile data set and split it train/test and features/label.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the data set to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = load_data(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data
=
raw_dataframe
()
# Delete rows with unknowns
data
=
data
.
dropna
()
# Shuffle the data
np
.
random
.
seed
(
seed
)
# Split the data into train/test subsets.
x_train
=
data
.
sample
(
frac
=
train_fraction
,
random_state
=
seed
)
x_test
=
data
.
drop
(
x_train
.
index
)
# Extract the label from the features DataFrame.
y_train
=
x_train
.
pop
(
y_name
)
y_test
=
x_test
.
pop
(
y_name
)
return
(
x_train
,
y_train
),
(
x_test
,
y_test
)
def
make_dataset
(
batch_sz
,
x
,
y
=
None
,
shuffle
=
False
,
shuffle_buffer_size
=
1000
):
"""Create a slice Dataset from a pandas DataFrame and labels"""
def
input_fn
():
if
y
is
not
None
:
dataset
=
tf
.
data
.
Dataset
.
from_tensor_slices
((
dict
(
x
),
y
))
else
:
dataset
=
tf
.
data
.
Dataset
.
from_tensor_slices
(
dict
(
x
))
if
shuffle
:
dataset
=
dataset
.
shuffle
(
shuffle_buffer_size
).
batch
(
batch_sz
).
repeat
()
else
:
dataset
=
dataset
.
batch
(
batch_sz
)
return
dataset
return
input_fn
samples/cookbook/regression/custom_regression.py
deleted
100644 → 0
View file @
8b47aa3d
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Regression using the DNNRegressor Estimator."""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
argparse
import
tensorflow
as
tf
import
automobile_data
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--batch_size'
,
default
=
100
,
type
=
int
,
help
=
'batch size'
)
parser
.
add_argument
(
'--train_steps'
,
default
=
1000
,
type
=
int
,
help
=
'number of training steps'
)
parser
.
add_argument
(
'--price_norm_factor'
,
default
=
1000.
,
type
=
float
,
help
=
'price normalization factor'
)
def
my_dnn_regression_fn
(
features
,
labels
,
mode
,
params
):
"""A model function implementing DNN regression for a custom Estimator."""
# Extract the input into a dense layer, according to the feature_columns.
top
=
tf
.
feature_column
.
input_layer
(
features
,
params
[
"feature_columns"
])
# Iterate over the "hidden_units" list of layer sizes, default is [20].
for
units
in
params
.
get
(
"hidden_units"
,
[
20
]):
# Add a hidden layer, densely connected on top of the previous layer.
top
=
tf
.
layers
.
dense
(
inputs
=
top
,
units
=
units
,
activation
=
tf
.
nn
.
relu
)
# Connect a linear output layer on top.
output_layer
=
tf
.
layers
.
dense
(
inputs
=
top
,
units
=
1
)
# Reshape the output layer to a 1-dim Tensor to return predictions
predictions
=
tf
.
squeeze
(
output_layer
,
1
)
if
mode
==
tf
.
estimator
.
ModeKeys
.
PREDICT
:
# In `PREDICT` mode we only need to return predictions.
return
tf
.
estimator
.
EstimatorSpec
(
mode
=
mode
,
predictions
=
{
"price"
:
predictions
})
# Calculate loss using mean squared error
average_loss
=
tf
.
losses
.
mean_squared_error
(
labels
,
predictions
)
# Pre-made estimators use the total_loss instead of the average,
# so report total_loss for compatibility.
batch_size
=
tf
.
shape
(
labels
)[
0
]
total_loss
=
tf
.
to_float
(
batch_size
)
*
average_loss
if
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
:
optimizer
=
params
.
get
(
"optimizer"
,
tf
.
train
.
AdamOptimizer
)
optimizer
=
optimizer
(
params
.
get
(
"learning_rate"
,
None
))
train_op
=
optimizer
.
minimize
(
loss
=
average_loss
,
global_step
=
tf
.
train
.
get_global_step
())
return
tf
.
estimator
.
EstimatorSpec
(
mode
=
mode
,
loss
=
total_loss
,
train_op
=
train_op
)
# In evaluation mode we will calculate evaluation metrics.
assert
mode
==
tf
.
estimator
.
ModeKeys
.
EVAL
# Calculate root mean squared error
print
(
labels
)
print
(
predictions
)
# Fixed for #4083
predictions
=
tf
.
cast
(
predictions
,
tf
.
float64
)
rmse
=
tf
.
metrics
.
root_mean_squared_error
(
labels
,
predictions
)
# Add the rmse to the collection of evaluation metrics.
eval_metrics
=
{
"rmse"
:
rmse
}
return
tf
.
estimator
.
EstimatorSpec
(
mode
=
mode
,
# Report sum of error for compatibility with pre-made estimators
loss
=
total_loss
,
eval_metric_ops
=
eval_metrics
)
def
main
(
argv
):
"""Builds, trains, and evaluates the model."""
args
=
parser
.
parse_args
(
argv
[
1
:])
(
train_x
,
train_y
),
(
test_x
,
test_y
)
=
automobile_data
.
load_data
()
train_y
/=
args
.
price_norm_factor
test_y
/=
args
.
price_norm_factor
# Provide the training input dataset.
train_input_fn
=
automobile_data
.
make_dataset
(
args
.
batch_size
,
train_x
,
train_y
,
True
,
1000
)
# Build the validation dataset.
test_input_fn
=
automobile_data
.
make_dataset
(
args
.
batch_size
,
test_x
,
test_y
)
# The first way assigns a unique weight to each category. To do this you must
# specify the category's vocabulary (values outside this specification will
# receive a weight of zero). Here we specify the vocabulary using a list of
# options. The vocabulary can also be specified with a vocabulary file (using
# `categorical_column_with_vocabulary_file`). For features covering a
# range of positive integers use `categorical_column_with_identity`.
body_style_vocab
=
[
"hardtop"
,
"wagon"
,
"sedan"
,
"hatchback"
,
"convertible"
]
body_style
=
tf
.
feature_column
.
categorical_column_with_vocabulary_list
(
key
=
"body-style"
,
vocabulary_list
=
body_style_vocab
)
make
=
tf
.
feature_column
.
categorical_column_with_hash_bucket
(
key
=
"make"
,
hash_bucket_size
=
50
)
feature_columns
=
[
tf
.
feature_column
.
numeric_column
(
key
=
"curb-weight"
),
tf
.
feature_column
.
numeric_column
(
key
=
"highway-mpg"
),
# Since this is a DNN model, convert categorical columns from sparse
# to dense.
# Wrap them in an `indicator_column` to create a
# one-hot vector from the input.
tf
.
feature_column
.
indicator_column
(
body_style
),
# Or use an `embedding_column` to create a trainable vector for each
# index.
tf
.
feature_column
.
embedding_column
(
make
,
dimension
=
3
),
]
# Build a custom Estimator, using the model_fn.
# `params` is passed through to the `model_fn`.
model
=
tf
.
estimator
.
Estimator
(
model_fn
=
my_dnn_regression_fn
,
params
=
{
"feature_columns"
:
feature_columns
,
"learning_rate"
:
0.001
,
"optimizer"
:
tf
.
train
.
AdamOptimizer
,
"hidden_units"
:
[
20
,
20
]
})
# Train the model.
model
.
train
(
input_fn
=
train_input_fn
,
steps
=
args
.
train_steps
)
# Evaluate how the model performs on data it has not yet seen.
eval_result
=
model
.
evaluate
(
input_fn
=
test_input_fn
)
# Print the Root Mean Square Error (RMSE).
print
(
"
\n
"
+
80
*
"*"
)
print
(
"
\n
RMS error for the test set: ${:.0f}"
.
format
(
args
.
price_norm_factor
*
eval_result
[
"rmse"
]))
print
()
if
__name__
==
"__main__"
:
# The Estimator periodically generates "INFO" logs; make these logs visible.
tf
.
logging
.
set_verbosity
(
tf
.
logging
.
INFO
)
tf
.
app
.
run
(
main
=
main
)
samples/cookbook/regression/dnn_regression.py
deleted
100644 → 0
View file @
8b47aa3d
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Regression using the DNNRegressor Estimator."""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
argparse
import
tensorflow
as
tf
import
automobile_data
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--batch_size'
,
default
=
100
,
type
=
int
,
help
=
'batch size'
)
parser
.
add_argument
(
'--train_steps'
,
default
=
5000
,
type
=
int
,
help
=
'number of training steps'
)
parser
.
add_argument
(
'--price_norm_factor'
,
default
=
1000.
,
type
=
float
,
help
=
'price normalization factor'
)
def
main
(
argv
):
"""Builds, trains, and evaluates the model."""
args
=
parser
.
parse_args
(
argv
[
1
:])
(
train_x
,
train_y
),
(
test_x
,
test_y
)
=
automobile_data
.
load_data
()
train_y
/=
args
.
price_norm_factor
test_y
/=
args
.
price_norm_factor
# Provide the training input dataset.
train_input_fn
=
automobile_data
.
make_dataset
(
args
.
batch_size
,
train_x
,
train_y
,
True
,
1000
)
# Provide the validation input dataset.
test_input_fn
=
automobile_data
.
make_dataset
(
args
.
batch_size
,
test_x
,
test_y
)
# Use the same categorical columns as in `linear_regression_categorical`
body_style_vocab
=
[
"hardtop"
,
"wagon"
,
"sedan"
,
"hatchback"
,
"convertible"
]
body_style_column
=
tf
.
feature_column
.
categorical_column_with_vocabulary_list
(
key
=
"body-style"
,
vocabulary_list
=
body_style_vocab
)
make_column
=
tf
.
feature_column
.
categorical_column_with_hash_bucket
(
key
=
"make"
,
hash_bucket_size
=
50
)
feature_columns
=
[
tf
.
feature_column
.
numeric_column
(
key
=
"curb-weight"
),
tf
.
feature_column
.
numeric_column
(
key
=
"highway-mpg"
),
# Since this is a DNN model, categorical columns must be converted from
# sparse to dense.
# Wrap them in an `indicator_column` to create a
# one-hot vector from the input.
tf
.
feature_column
.
indicator_column
(
body_style_column
),
# Or use an `embedding_column` to create a trainable vector for each
# index.
tf
.
feature_column
.
embedding_column
(
make_column
,
dimension
=
3
),
]
# Build a DNNRegressor, with 2x20-unit hidden layers, with the feature columns
# defined above as input.
model
=
tf
.
estimator
.
DNNRegressor
(
hidden_units
=
[
20
,
20
],
feature_columns
=
feature_columns
)
# Train the model.
# By default, the Estimators log output every 100 steps.
model
.
train
(
input_fn
=
train_input_fn
,
steps
=
args
.
train_steps
)
# Evaluate how the model performs on data it has not yet seen.
eval_result
=
model
.
evaluate
(
input_fn
=
test_input_fn
)
# The evaluation returns a Python dictionary. The "average_loss" key holds the
# Mean Squared Error (MSE).
average_loss
=
eval_result
[
"average_loss"
]
# Convert MSE to Root Mean Square Error (RMSE).
print
(
"
\n
"
+
80
*
"*"
)
print
(
"
\n
RMS error for the test set: ${:.0f}"
.
format
(
args
.
price_norm_factor
*
average_loss
**
0.5
))
print
()
if
__name__
==
"__main__"
:
# The Estimator periodically generates "INFO" logs; make these logs visible.
tf
.
logging
.
set_verbosity
(
tf
.
logging
.
INFO
)
tf
.
app
.
run
(
main
=
main
)
samples/cookbook/regression/linear_regression.py
deleted
100644 → 0
View file @
8b47aa3d
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear regression using the LinearRegressor Estimator."""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
argparse
import
numpy
as
np
import
tensorflow
as
tf
from
absl
import
app
import
automobile_data
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--batch_size'
,
default
=
100
,
type
=
int
,
help
=
'batch size'
)
parser
.
add_argument
(
'--train_steps'
,
default
=
1000
,
type
=
int
,
help
=
'number of training steps'
)
parser
.
add_argument
(
'--price_norm_factor'
,
default
=
1000.
,
type
=
float
,
help
=
'price normalization factor'
)
def
main
(
argv
):
"""Builds, trains, and evaluates the model."""
args
=
parser
.
parse_args
(
argv
[
1
:])
(
train_x
,
train_y
),
(
test_x
,
test_y
)
=
automobile_data
.
load_data
()
train_y
/=
args
.
price_norm_factor
test_y
/=
args
.
price_norm_factor
# Provide the training input dataset.
train_input_fn
=
automobile_data
.
make_dataset
(
args
.
batch_size
,
train_x
,
train_y
,
True
,
1000
)
# Provide the validation input dataset.
test_input_fn
=
automobile_data
.
make_dataset
(
args
.
batch_size
,
test_x
,
test_y
)
feature_columns
=
[
# "curb-weight" and "highway-mpg" are numeric columns.
tf
.
feature_column
.
numeric_column
(
key
=
"curb-weight"
),
tf
.
feature_column
.
numeric_column
(
key
=
"highway-mpg"
),
]
# Build the Estimator.
model
=
tf
.
estimator
.
LinearRegressor
(
feature_columns
=
feature_columns
)
# Train the model.
# By default, the Estimators log output every 100 steps.
model
.
train
(
input_fn
=
train_input_fn
,
steps
=
args
.
train_steps
)
# Evaluate how the model performs on data it has not yet seen.
eval_result
=
model
.
evaluate
(
input_fn
=
test_input_fn
)
# The evaluation returns a Python dictionary. The "average_loss" key holds the
# Mean Squared Error (MSE).
average_loss
=
eval_result
[
"average_loss"
]
# Convert MSE to Root Mean Square Error (RMSE).
print
(
"
\n
"
+
80
*
"*"
)
print
(
"
\n
RMS error for the test set: ${:.0f}"
.
format
(
args
.
price_norm_factor
*
average_loss
**
0.5
))
# Run the model in prediction mode.
input_dict
=
{
"curb-weight"
:
np
.
array
([
2000
,
3000
]),
"highway-mpg"
:
np
.
array
([
30
,
40
])
}
# Provide the predict input dataset.
predict_input_fn
=
automobile_data
.
make_dataset
(
1
,
input_dict
)
predict_results
=
model
.
predict
(
input_fn
=
predict_input_fn
)
# Print the prediction results.
print
(
"
\n
Prediction results:"
)
for
i
,
prediction
in
enumerate
(
predict_results
):
msg
=
(
"Curb weight: {: 4d}lbs, "
"Highway: {: 0d}mpg, "
"Prediction: ${: 9.2f}"
)
msg
=
msg
.
format
(
input_dict
[
"curb-weight"
][
i
],
input_dict
[
"highway-mpg"
][
i
],
args
.
price_norm_factor
*
prediction
[
"predictions"
][
0
])
print
(
" "
+
msg
)
print
()
if
__name__
==
"__main__"
:
# The Estimator periodically generates "INFO" logs; make these logs visible.
tf
.
compat
.
v1
.
logging
.
set_verbosity
(
tf
.
compat
.
v1
.
logging
.
INFO
)
app
.
run
(
main
=
main
)
samples/cookbook/regression/linear_regression_categorical.py
deleted
100644 → 0
View file @
8b47aa3d
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear regression with categorical features."""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
argparse
import
tensorflow
as
tf
import
automobile_data
parser
=
argparse
.
ArgumentParser
()
parser
.
add_argument
(
'--batch_size'
,
default
=
100
,
type
=
int
,
help
=
'batch size'
)
parser
.
add_argument
(
'--train_steps'
,
default
=
1000
,
type
=
int
,
help
=
'number of training steps'
)
parser
.
add_argument
(
'--price_norm_factor'
,
default
=
1000.
,
type
=
float
,
help
=
'price normalization factor'
)
def
main
(
argv
):
"""Builds, trains, and evaluates the model."""
args
=
parser
.
parse_args
(
argv
[
1
:])
(
train_x
,
train_y
),
(
test_x
,
test_y
)
=
automobile_data
.
load_data
()
train_y
/=
args
.
price_norm_factor
test_y
/=
args
.
price_norm_factor
# Provide the training input dataset.
train_input_fn
=
automobile_data
.
make_dataset
(
args
.
batch_size
,
train_x
,
train_y
,
True
,
1000
)
# Provide the validation input dataset.
test_input_fn
=
automobile_data
.
make_dataset
(
args
.
batch_size
,
test_x
,
test_y
)
# The following code demonstrates two of the ways that `feature_columns` can
# be used to build a model with categorical inputs.
# The first way assigns a unique weight to each category. To do this, you must
# specify the category's vocabulary (values outside this specification will
# receive a weight of zero).
# Alternatively, you can define the vocabulary in a file (by calling
# `categorical_column_with_vocabulary_file`) or as a range of positive
# integers (by calling `categorical_column_with_identity`)
body_style_vocab
=
[
"hardtop"
,
"wagon"
,
"sedan"
,
"hatchback"
,
"convertible"
]
body_style_column
=
tf
.
feature_column
.
categorical_column_with_vocabulary_list
(
key
=
"body-style"
,
vocabulary_list
=
body_style_vocab
)
# The second way, appropriate for an unspecified vocabulary, is to create a
# hashed column. It will create a fixed length list of weights, and
# automatically assign each input category to a weight. Due to the
# pseudo-randomness of the process, some weights may be shared between
# categories, while others will remain unused.
make_column
=
tf
.
feature_column
.
categorical_column_with_hash_bucket
(
key
=
"make"
,
hash_bucket_size
=
50
)
feature_columns
=
[
# This model uses the same two numeric features as `linear_regressor.py`
tf
.
feature_column
.
numeric_column
(
key
=
"curb-weight"
),
tf
.
feature_column
.
numeric_column
(
key
=
"highway-mpg"
),
# This model adds two categorical colums that will adjust the price based
# on "make" and "body-style".
body_style_column
,
make_column
,
]
# Build the Estimator.
model
=
tf
.
estimator
.
LinearRegressor
(
feature_columns
=
feature_columns
)
# Train the model.
# By default, the Estimators log output every 100 steps.
model
.
train
(
input_fn
=
train_input_fn
,
steps
=
args
.
train_steps
)
# Evaluate how the model performs on data it has not yet seen.
eval_result
=
model
.
evaluate
(
input_fn
=
test_input_fn
)
# The evaluation returns a Python dictionary. The "average_loss" key holds the
# Mean Squared Error (MSE).
average_loss
=
eval_result
[
"average_loss"
]
# Convert MSE to Root Mean Square Error (RMSE).
print
(
"
\n
"
+
80
*
"*"
)
print
(
"
\n
RMS error for the test set: ${:.0f}"
.
format
(
args
.
price_norm_factor
*
average_loss
**
0.5
))
print
()
if
__name__
==
"__main__"
:
# The Estimator periodically generates "INFO" logs; make these logs visible.
tf
.
logging
.
set_verbosity
(
tf
.
logging
.
INFO
)
tf
.
app
.
run
(
main
=
main
)
samples/cookbook/regression/regression_test.py
deleted
100644 → 0
View file @
8b47aa3d
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple smoke test that runs these examples for 1 training iteraton."""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
pandas
as
pd
import
tensorflow
as
tf
from
six.moves
import
StringIO
import
automobile_data
import
dnn_regression
import
linear_regression
import
linear_regression_categorical
import
custom_regression
# pylint: disable=line-too-long
FOUR_LINES
=
"
\n
"
.
join
([
"1,?,alfa-romero,gas,std,two,hatchback,rwd,front,94.50,171.20,65.50,52.40,2823,ohcv,six,152,mpfi,2.68,3.47,9.00,154,5000,19,26,16500"
,
"2,164,audi,gas,std,four,sedan,fwd,front,99.80,176.60,66.20,54.30,2337,ohc,four,109,mpfi,3.19,3.40,10.00,102,5500,24,30,13950"
,
"2,164,audi,gas,std,four,sedan,4wd,front,99.40,176.60,66.40,54.30,2824,ohc,five,136,mpfi,3.19,3.40,8.00,115,5500,18,22,17450"
,
"2,?,audi,gas,std,two,sedan,fwd,front,99.80,177.30,66.30,53.10,2507,ohc,five,136,mpfi,3.19,3.40,8.50,110,5500,19,25,15250"
,])
# pylint: enable=line-too-long
mock
=
tf
.
test
.
mock
def
four_lines_dataframe
():
text
=
StringIO
(
FOUR_LINES
)
return
pd
.
read_csv
(
text
,
names
=
automobile_data
.
COLUMN_TYPES
.
keys
(),
dtype
=
automobile_data
.
COLUMN_TYPES
,
na_values
=
"?"
)
def
four_lines_dataset
(
*
args
,
**
kwargs
):
del
args
,
kwargs
return
tf
.
data
.
Dataset
.
from_tensor_slices
(
FOUR_LINES
.
split
(
"
\n
"
))
class
RegressionTest
(
tf
.
test
.
TestCase
):
"""Test the regression examples in this directory."""
@
mock
.
patch
.
dict
(
automobile_data
.
__dict__
,
{
"raw_dataframe"
:
four_lines_dataframe
})
def
test_linear_regression
(
self
):
linear_regression
.
main
([
None
,
"--train_steps=1"
])
@
mock
.
patch
.
dict
(
automobile_data
.
__dict__
,
{
"raw_dataframe"
:
four_lines_dataframe
})
def
test_linear_regression_categorical
(
self
):
linear_regression_categorical
.
main
([
None
,
"--train_steps=1"
])
@
mock
.
patch
.
dict
(
automobile_data
.
__dict__
,
{
"raw_dataframe"
:
four_lines_dataframe
})
def
test_dnn_regression
(
self
):
dnn_regression
.
main
([
None
,
"--train_steps=1"
])
@
mock
.
patch
.
dict
(
automobile_data
.
__dict__
,
{
"raw_dataframe"
:
four_lines_dataframe
})
def
test_custom_regression
(
self
):
custom_regression
.
main
([
None
,
"--train_steps=1"
])
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment