Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
1f4747a4
Commit
1f4747a4
authored
Apr 02, 2018
by
pkulzc
Browse files
Merge remote-tracking branch 'upstream/master'
parents
d2d01f4f
a7aa25d3
Changes
116
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
432 additions
and
6 deletions
+432
-6
official/utils/logging/logger_test.py
official/utils/logging/logger_test.py
+80
-2
official/utils/testing/integration.py
official/utils/testing/integration.py
+10
-3
official/utils/testing/pylint.rcfile
official/utils/testing/pylint.rcfile
+1
-1
official/utils/testing/reference_data.py
official/utils/testing/reference_data.py
+334
-0
official/utils/testing/reference_data/reference_data_test/dense/expected_graph
...g/reference_data/reference_data_test/dense/expected_graph
+0
-0
official/utils/testing/reference_data/reference_data_test/dense/model.ckpt.data-00000-of-00001
.../reference_data_test/dense/model.ckpt.data-00000-of-00001
+0
-0
official/utils/testing/reference_data/reference_data_test/dense/model.ckpt.index
...reference_data/reference_data_test/dense/model.ckpt.index
+0
-0
official/utils/testing/reference_data/reference_data_test/dense/results.json
...ing/reference_data/reference_data_test/dense/results.json
+1
-0
official/utils/testing/reference_data/reference_data_test/dense/tf_version.json
.../reference_data/reference_data_test/dense/tf_version.json
+1
-0
official/utils/testing/reference_data/reference_data_test/uniform_random/expected_graph
...ce_data/reference_data_test/uniform_random/expected_graph
+0
-0
official/utils/testing/reference_data/reference_data_test/uniform_random/model.ckpt.data-00000-of-00001
...e_data_test/uniform_random/model.ckpt.data-00000-of-00001
+1
-0
official/utils/testing/reference_data/reference_data_test/uniform_random/model.ckpt.index
..._data/reference_data_test/uniform_random/model.ckpt.index
+0
-0
official/utils/testing/reference_data/reference_data_test/uniform_random/results.json
...ence_data/reference_data_test/uniform_random/results.json
+1
-0
official/utils/testing/reference_data/reference_data_test/uniform_random/tf_version.json
...e_data/reference_data_test/uniform_random/tf_version.json
+1
-0
official/utils/testing/reference_data/resnet/batch-size-32_bottleneck_projection_version-1_width-8_channels-4/expected_graph
...ck_projection_version-1_width-8_channels-4/expected_graph
+0
-0
official/utils/testing/reference_data/resnet/batch-size-32_bottleneck_projection_version-1_width-8_channels-4/model.ckpt.data-00000-of-00001
...rsion-1_width-8_channels-4/model.ckpt.data-00000-of-00001
+0
-0
official/utils/testing/reference_data/resnet/batch-size-32_bottleneck_projection_version-1_width-8_channels-4/model.ckpt.index
..._projection_version-1_width-8_channels-4/model.ckpt.index
+0
-0
official/utils/testing/reference_data/resnet/batch-size-32_bottleneck_projection_version-1_width-8_channels-4/results.json
...neck_projection_version-1_width-8_channels-4/results.json
+1
-0
official/utils/testing/reference_data/resnet/batch-size-32_bottleneck_projection_version-1_width-8_channels-4/tf_version.json
...k_projection_version-1_width-8_channels-4/tf_version.json
+1
-0
official/utils/testing/reference_data/resnet/batch-size-32_bottleneck_projection_version-2_width-8_channels-4/expected_graph
...ck_projection_version-2_width-8_channels-4/expected_graph
+0
-0
No files found.
official/utils/logging/logger_test.py
View file @
1f4747a4
...
...
@@ -22,6 +22,7 @@ from __future__ import print_function
import
json
import
os
import
tempfile
import
unittest
import
tensorflow
as
tf
# pylint: disable=g-bad-import-order
...
...
@@ -30,9 +31,19 @@ from official.utils.logging import logger
class
BenchmarkLoggerTest
(
tf
.
test
.
TestCase
):
def
setUp
(
self
):
super
(
BenchmarkLoggerTest
,
self
).
setUp
()
# Avoid pulling extra env vars from test environment which affects the test
# result, eg. Kokoro test has a TF_PKG env which affect the test case
# test_collect_tensorflow_environment_variables()
self
.
original_environ
=
dict
(
os
.
environ
)
os
.
environ
.
clear
()
def
tearDown
(
self
):
super
(
BenchmarkLoggerTest
,
self
).
tearDown
()
tf
.
gfile
.
DeleteRecursively
(
self
.
get_temp_dir
())
os
.
environ
.
clear
()
os
.
environ
.
update
(
self
.
original_environ
)
def
test_create_logging_dir
(
self
):
non_exist_temp_dir
=
os
.
path
.
join
(
self
.
get_temp_dir
(),
"unknown_dir"
)
...
...
@@ -54,7 +65,7 @@ class BenchmarkLoggerTest(tf.test.TestCase):
self
.
assertEqual
(
metric
[
"value"
],
0.999
)
self
.
assertEqual
(
metric
[
"unit"
],
None
)
self
.
assertEqual
(
metric
[
"global_step"
],
1e4
)
self
.
assertEqual
(
metric
[
"extras"
],
{
"name"
:
"value"
})
self
.
assertEqual
(
metric
[
"extras"
],
[
{
"name"
:
"name"
,
"value"
:
"value"
}
]
)
def
test_log_multiple_metrics
(
self
):
log_dir
=
tempfile
.
mkdtemp
(
dir
=
self
.
get_temp_dir
())
...
...
@@ -70,13 +81,14 @@ class BenchmarkLoggerTest(tf.test.TestCase):
self
.
assertEqual
(
accuracy
[
"value"
],
0.999
)
self
.
assertEqual
(
accuracy
[
"unit"
],
None
)
self
.
assertEqual
(
accuracy
[
"global_step"
],
1e4
)
self
.
assertEqual
(
accuracy
[
"extras"
],
{
"name"
:
"value"
})
self
.
assertEqual
(
accuracy
[
"extras"
],
[
{
"name"
:
"name"
,
"value"
:
"value"
}
]
)
loss
=
json
.
loads
(
f
.
readline
())
self
.
assertEqual
(
loss
[
"name"
],
"loss"
)
self
.
assertEqual
(
loss
[
"value"
],
0.02
)
self
.
assertEqual
(
loss
[
"unit"
],
None
)
self
.
assertEqual
(
loss
[
"global_step"
],
1e4
)
self
.
assertEqual
(
loss
[
"extras"
],
[])
def
test_log_non_nubmer_value
(
self
):
log_dir
=
tempfile
.
mkdtemp
(
dir
=
self
.
get_temp_dir
())
...
...
@@ -87,5 +99,71 @@ class BenchmarkLoggerTest(tf.test.TestCase):
metric_log
=
os
.
path
.
join
(
log_dir
,
"metric.log"
)
self
.
assertFalse
(
tf
.
gfile
.
Exists
(
metric_log
))
def
test_log_evaluation_result
(
self
):
eval_result
=
{
"loss"
:
0.46237424
,
"global_step"
:
207082
,
"accuracy"
:
0.9285
}
log_dir
=
tempfile
.
mkdtemp
(
dir
=
self
.
get_temp_dir
())
log
=
logger
.
BenchmarkLogger
(
log_dir
)
log
.
log_estimator_evaluation_result
(
eval_result
)
metric_log
=
os
.
path
.
join
(
log_dir
,
"metric.log"
)
self
.
assertTrue
(
tf
.
gfile
.
Exists
(
metric_log
))
with
tf
.
gfile
.
GFile
(
metric_log
)
as
f
:
accuracy
=
json
.
loads
(
f
.
readline
())
self
.
assertEqual
(
accuracy
[
"name"
],
"accuracy"
)
self
.
assertEqual
(
accuracy
[
"value"
],
0.9285
)
self
.
assertEqual
(
accuracy
[
"unit"
],
None
)
self
.
assertEqual
(
accuracy
[
"global_step"
],
207082
)
loss
=
json
.
loads
(
f
.
readline
())
self
.
assertEqual
(
loss
[
"name"
],
"loss"
)
self
.
assertEqual
(
loss
[
"value"
],
0.46237424
)
self
.
assertEqual
(
loss
[
"unit"
],
None
)
self
.
assertEqual
(
loss
[
"global_step"
],
207082
)
def
test_log_evaluation_result_with_invalid_type
(
self
):
eval_result
=
"{'loss': 0.46237424, 'global_step': 207082}"
log_dir
=
tempfile
.
mkdtemp
(
dir
=
self
.
get_temp_dir
())
log
=
logger
.
BenchmarkLogger
(
log_dir
)
log
.
log_estimator_evaluation_result
(
eval_result
)
metric_log
=
os
.
path
.
join
(
log_dir
,
"metric.log"
)
self
.
assertFalse
(
tf
.
gfile
.
Exists
(
metric_log
))
def
test_collect_tensorflow_info
(
self
):
run_info
=
{}
logger
.
_collect_tensorflow_info
(
run_info
)
self
.
assertNotEqual
(
run_info
[
"tensorflow_version"
],
{})
self
.
assertEqual
(
run_info
[
"tensorflow_version"
][
"version"
],
tf
.
VERSION
)
self
.
assertEqual
(
run_info
[
"tensorflow_version"
][
"git_hash"
],
tf
.
GIT_VERSION
)
def
test_collect_tensorflow_environment_variables
(
self
):
os
.
environ
[
"TF_ENABLE_WINOGRAD_NONFUSED"
]
=
"1"
os
.
environ
[
"TF_OTHER"
]
=
"2"
os
.
environ
[
"OTHER"
]
=
"3"
run_info
=
{}
logger
.
_collect_tensorflow_environment_variables
(
run_info
)
self
.
assertIsNotNone
(
run_info
[
"tensorflow_environment_variables"
])
expected_tf_envs
=
[
{
"name"
:
"TF_ENABLE_WINOGRAD_NONFUSED"
,
"value"
:
"1"
},
{
"name"
:
"TF_OTHER"
,
"value"
:
"2"
},
]
self
.
assertEqual
(
run_info
[
"tensorflow_environment_variables"
],
expected_tf_envs
)
@
unittest
.
skipUnless
(
tf
.
test
.
is_built_with_cuda
(),
"requires GPU"
)
def
test_collect_gpu_info
(
self
):
run_info
=
{
"machine_config"
:
{}}
logger
.
_collect_gpu_info
(
run_info
)
self
.
assertNotEqual
(
run_info
[
"machine_config"
][
"gpu_info"
],
{})
def
test_collect_memory_info
(
self
):
run_info
=
{
"machine_config"
:
{}}
logger
.
_collect_memory_info
(
run_info
)
self
.
assertIsNotNone
(
run_info
[
"machine_config"
][
"memory_total"
])
self
.
assertIsNotNone
(
run_info
[
"machine_config"
][
"memory_available"
])
if
__name__
==
"__main__"
:
tf
.
test
.
main
()
official/utils/testing/integration.py
View file @
1f4747a4
...
...
@@ -26,7 +26,7 @@ import sys
import
tempfile
def
run_synthetic
(
main
,
tmp_root
,
extra_flags
=
None
):
def
run_synthetic
(
main
,
tmp_root
,
extra_flags
=
None
,
synth
=
True
,
max_train
=
1
):
"""Performs a minimal run of a model.
This function is intended to test for syntax errors throughout a model. A
...
...
@@ -37,6 +37,8 @@ def run_synthetic(main, tmp_root, extra_flags=None):
function is "<MODULE>.main(argv)".
tmp_root: Root path for the temp directory created by the test class.
extra_flags: Additional flags passed by the caller of this function.
synth: Use synthetic data.
max_train: Maximum number of allowed training steps.
"""
extra_flags
=
[]
if
extra_flags
is
None
else
extra_flags
...
...
@@ -44,8 +46,13 @@ def run_synthetic(main, tmp_root, extra_flags=None):
model_dir
=
tempfile
.
mkdtemp
(
dir
=
tmp_root
)
args
=
[
sys
.
argv
[
0
],
"--model_dir"
,
model_dir
,
"--train_epochs"
,
"1"
,
"--epochs_between_evals"
,
"1"
,
"--use_synthetic_data"
,
"--max_train_steps"
,
"1"
]
+
extra_flags
"--epochs_between_evals"
,
"1"
]
+
extra_flags
if
synth
:
args
.
append
(
"--use_synthetic_data"
)
if
max_train
is
not
None
:
args
.
extend
([
"--max_train_steps"
,
str
(
max_train
)])
try
:
main
(
args
)
...
...
official/utils/testing/pylint.rcfile
View file @
1f4747a4
...
...
@@ -61,7 +61,7 @@ variable-rgx=^[a-z][a-z0-9_]*$
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=official, official.*, tensorflow, tensorflow.*, LazyLoader
ignored-modules=official, official.*, tensorflow, tensorflow.*, LazyLoader
, google, google.cloud.*
[CLASSES]
...
...
official/utils/testing/reference_data.py
0 → 100644
View file @
1f4747a4
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow testing subclass to automate numerical testing.
Reference tests determine when behavior deviates from some "gold standard," and
are useful for determining when layer definitions have changed without
performing full regression testing, which is generally prohibitive. This class
handles the symbolic graph comparison as well as loading weights to avoid
relying on random number generation, which can change.
The tests performed by this class are:
1) Compare a generated graph against a reference graph. Differences are not
necessarily fatal.
2) Attempt to load known weights for the graph. If this step succeeds but
changes are present in the graph, a warning is issued but does not raise
an exception.
3) Perform a calculation and compare the result to a reference value.
This class also provides a method to generate reference data.
Note:
The test class is responsible for fixing the random seed during graph
definition. A convenience method name_to_seed() is provided to make this
process easier.
The test class should also define a .regenerate() class method which (usually)
just calls the op definition function with test=False for all relevant tests.
A concise example of this class in action is provided in:
official/utils/testing/reference_data_test.py
"""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
argparse
import
hashlib
import
json
import
os
import
shutil
import
sys
import
numpy
as
np
import
tensorflow
as
tf
from
tensorflow.python
import
pywrap_tensorflow
class
BaseTest
(
tf
.
test
.
TestCase
):
"""TestCase subclass for performing reference data tests."""
def
regenerate
(
self
):
"""Subclasses should override this function to generate a new reference."""
raise
NotImplementedError
@
property
def
test_name
(
self
):
"""Subclass should define its own name."""
raise
NotImplementedError
@
property
def
data_root
(
self
):
"""Use the subclass directory rather than the parent directory.
Returns:
The path prefix for reference data.
"""
return
os
.
path
.
join
(
os
.
path
.
split
(
os
.
path
.
abspath
(
__file__
))[
0
],
"reference_data"
,
self
.
test_name
)
ckpt_prefix
=
"model.ckpt"
@
staticmethod
def
name_to_seed
(
name
):
"""Convert a string into a 32 bit integer.
This function allows test cases to easily generate random fixed seeds by
hashing the name of the test. The hash string is in hex rather than base 10
which is why there is a 16 in the int call, and the modulo projects the
seed from a 128 bit int to 32 bits for readability.
Args:
name: A string containing the name of a test.
Returns:
A pseudo-random 32 bit integer derived from name.
"""
seed
=
hashlib
.
md5
(
name
.
encode
(
"utf-8"
)).
hexdigest
()
return
int
(
seed
,
16
)
%
(
2
**
32
-
1
)
@
staticmethod
def
common_tensor_properties
(
input_array
):
"""Convenience function for matrix testing.
In tests we wish to determine whether a result has changed. However storing
an entire n-dimensional array is impractical. A better approach is to
calculate several values from that array and test that those derived values
are unchanged. The properties themselves are arbitrary and should be chosen
to be good proxies for a full equality test.
Args:
input_array: A numpy array from which key values are extracted.
Returns:
A list of values derived from the input_array for equality tests.
"""
output
=
list
(
input_array
.
shape
)
flat_array
=
input_array
.
flatten
()
output
.
extend
([
float
(
i
)
for
i
in
[
flat_array
[
0
],
flat_array
[
-
1
],
np
.
sum
(
flat_array
)]])
return
output
def
default_correctness_function
(
self
,
*
args
):
"""Returns a vector with the concatenation of common properties.
This function simply calls common_tensor_properties() for every element.
It is useful as it allows one to easily construct tests of layers without
having to worry about the details of result checking.
Args:
*args: A list of numpy arrays corresponding to tensors which have been
evaluated.
Returns:
A list of values containing properties for every element in args.
"""
output
=
[]
for
arg
in
args
:
output
.
extend
(
self
.
common_tensor_properties
(
arg
))
return
output
def
_construct_and_save_reference_files
(
self
,
name
,
graph
,
ops_to_eval
,
correctness_function
):
"""Save reference data files.
Constructs a serialized graph_def, layer weights, and computation results.
It then saves them to files which are read at test time.
Args:
name: String defining the run. This will be used to define folder names
and will be used for random seed construction.
graph: The graph in which the test is conducted.
ops_to_eval: Ops which the user wishes to be evaluated under a controlled
session.
correctness_function: This function accepts the evaluated results of
ops_to_eval, and returns a list of values. This list must be JSON
serializable; in particular it is up to the user to convert numpy
dtypes into builtin dtypes.
"""
data_dir
=
os
.
path
.
join
(
self
.
data_root
,
name
)
# Make sure there is a clean space for results.
if
os
.
path
.
exists
(
data_dir
):
shutil
.
rmtree
(
data_dir
)
os
.
makedirs
(
data_dir
)
# Serialize graph for comparison.
graph_bytes
=
graph
.
as_graph_def
().
SerializeToString
()
expected_file
=
os
.
path
.
join
(
data_dir
,
"expected_graph"
)
with
open
(
expected_file
,
"wb"
)
as
f
:
f
.
write
(
graph_bytes
)
with
graph
.
as_default
():
init
=
tf
.
global_variables_initializer
()
saver
=
tf
.
train
.
Saver
()
with
self
.
test_session
(
graph
=
graph
)
as
sess
:
sess
.
run
(
init
)
saver
.
save
(
sess
=
sess
,
save_path
=
os
.
path
.
join
(
data_dir
,
self
.
ckpt_prefix
))
# These files are not needed for this test.
os
.
remove
(
os
.
path
.
join
(
data_dir
,
"checkpoint"
))
os
.
remove
(
os
.
path
.
join
(
data_dir
,
self
.
ckpt_prefix
+
".meta"
))
# ops are evaluated even if there is no correctness function to ensure
# that they can be evaluated.
eval_results
=
[
op
.
eval
()
for
op
in
ops_to_eval
]
if
correctness_function
is
not
None
:
results
=
correctness_function
(
*
eval_results
)
with
open
(
os
.
path
.
join
(
data_dir
,
"results.json"
),
"wt"
)
as
f
:
json
.
dump
(
results
,
f
)
with
open
(
os
.
path
.
join
(
data_dir
,
"tf_version.json"
),
"wt"
)
as
f
:
json
.
dump
([
tf
.
VERSION
,
tf
.
GIT_VERSION
],
f
)
def
_evaluate_test_case
(
self
,
name
,
graph
,
ops_to_eval
,
correctness_function
):
"""Determine if a graph agrees with the reference data.
Args:
name: String defining the run. This will be used to define folder names
and will be used for random seed construction.
graph: The graph in which the test is conducted.
ops_to_eval: Ops which the user wishes to be evaluated under a controlled
session.
correctness_function: This function accepts the evaluated results of
ops_to_eval, and returns a list of values. This list must be JSON
serializable; in particular it is up to the user to convert numpy
dtypes into builtin dtypes.
"""
data_dir
=
os
.
path
.
join
(
self
.
data_root
,
name
)
# Serialize graph for comparison.
graph_bytes
=
graph
.
as_graph_def
().
SerializeToString
()
expected_file
=
os
.
path
.
join
(
data_dir
,
"expected_graph"
)
with
open
(
expected_file
,
"rb"
)
as
f
:
expected_graph_bytes
=
f
.
read
()
# The serialization is non-deterministic byte-for-byte. Instead there is
# a utility which evaluates the semantics of the two graphs to test for
# equality. This has the added benefit of providing some information on
# what changed.
# Note: The summary only show the first difference detected. It is not
# an exhaustive summary of differences.
differences
=
pywrap_tensorflow
.
EqualGraphDefWrapper
(
graph_bytes
,
expected_graph_bytes
).
decode
(
"utf-8"
)
with
graph
.
as_default
():
init
=
tf
.
global_variables_initializer
()
saver
=
tf
.
train
.
Saver
()
with
open
(
os
.
path
.
join
(
data_dir
,
"tf_version.json"
),
"rt"
)
as
f
:
tf_version_reference
,
tf_git_version_reference
=
json
.
load
(
f
)
# pylint: disable=unpacking-non-sequence
tf_version_comparison
=
""
if
tf
.
GIT_VERSION
!=
tf_git_version_reference
:
tf_version_comparison
=
(
"Test was built using: {} (git = {})
\n
"
"Local TensorFlow version: {} (git = {})"
.
format
(
tf_version_reference
,
tf_git_version_reference
,
tf
.
VERSION
,
tf
.
GIT_VERSION
)
)
with
self
.
test_session
(
graph
=
graph
)
as
sess
:
sess
.
run
(
init
)
try
:
saver
.
restore
(
sess
=
sess
,
save_path
=
os
.
path
.
join
(
data_dir
,
self
.
ckpt_prefix
))
if
differences
:
tf
.
logging
.
warn
(
"The provided graph is different than expected:
\n
{}
\n
"
"However the weights were still able to be loaded.
\n
{}"
.
format
(
differences
,
tf_version_comparison
)
)
except
:
# pylint: disable=bare-except
raise
self
.
failureException
(
"Weight load failed. Graph comparison:
\n
{}{}"
.
format
(
differences
,
tf_version_comparison
))
eval_results
=
[
op
.
eval
()
for
op
in
ops_to_eval
]
if
correctness_function
is
not
None
:
results
=
correctness_function
(
*
eval_results
)
with
open
(
os
.
path
.
join
(
data_dir
,
"results.json"
),
"rt"
)
as
f
:
expected_results
=
json
.
load
(
f
)
self
.
assertAllClose
(
results
,
expected_results
)
def
_save_or_test_ops
(
self
,
name
,
graph
,
ops_to_eval
=
None
,
test
=
True
,
correctness_function
=
None
):
"""Utility function to automate repeated work of graph checking and saving.
The philosophy of this function is that the user need only define ops on
a graph and specify which results should be validated. The actual work of
managing snapshots and calculating results should be automated away.
Args:
name: String defining the run. This will be used to define folder names
and will be used for random seed construction.
graph: The graph in which the test is conducted.
ops_to_eval: Ops which the user wishes to be evaluated under a controlled
session.
test: Boolean. If True this function will test graph correctness, load
weights, and compute numerical values. If False the necessary test data
will be generated and saved.
correctness_function: This function accepts the evaluated results of
ops_to_eval, and returns a list of values. This list must be JSON
serializable; in particular it is up to the user to convert numpy
dtypes into builtin dtypes.
"""
ops_to_eval
=
ops_to_eval
or
[]
if
test
:
try
:
self
.
_evaluate_test_case
(
name
=
name
,
graph
=
graph
,
ops_to_eval
=
ops_to_eval
,
correctness_function
=
correctness_function
)
except
:
tf
.
logging
.
error
(
"Failed unittest {}"
.
format
(
name
))
raise
else
:
self
.
_construct_and_save_reference_files
(
name
=
name
,
graph
=
graph
,
ops_to_eval
=
ops_to_eval
,
correctness_function
=
correctness_function
)
class
ReferenceDataActionParser
(
argparse
.
ArgumentParser
):
"""Minimal arg parser so that test regeneration can be called from the CLI."""
def
__init__
(
self
):
super
(
ReferenceDataActionParser
,
self
).
__init__
()
self
.
add_argument
(
"--regenerate"
,
"-regen"
,
action
=
"store_true"
,
help
=
"Enable this flag to regenerate test data. If not set unit tests"
"will be run."
)
def
main
(
argv
,
test_class
):
"""Simple switch function to allow test regeneration from the CLI."""
flags
=
ReferenceDataActionParser
().
parse_args
(
argv
[
1
:])
if
flags
.
regenerate
:
if
sys
.
version_info
[
0
]
==
2
:
raise
NameError
(
"
\n
Python2 unittest does not support being run as a "
"standalone class.
\n
As a result tests must be "
"regenerated using Python3.
\n
"
"Tests can be run under 2 or 3."
)
test_class
().
regenerate
()
else
:
tf
.
test
.
main
()
official/utils/testing/reference_data/reference_data_test/dense/expected_graph
0 → 100644
View file @
1f4747a4
File added
official/utils/testing/reference_data/reference_data_test/dense/model.ckpt.data-00000-of-00001
0 → 100644
View file @
1f4747a4
File added
official/utils/testing/reference_data/reference_data_test/dense/model.ckpt.index
0 → 100644
View file @
1f4747a4
File added
official/utils/testing/reference_data/reference_data_test/dense/results.json
0 → 100644
View file @
1f4747a4
[
1
,
1
,
0.4701630473136902
,
0.4701630473136902
,
0.4701630473136902
]
\ No newline at end of file
official/utils/testing/reference_data/reference_data_test/dense/tf_version.json
0 → 100644
View file @
1f4747a4
[
"1.8.0-dev20180325"
,
"v1.7.0-rc1-750-g6c1737e6c8"
]
\ No newline at end of file
official/utils/testing/reference_data/reference_data_test/uniform_random/expected_graph
0 → 100644
View file @
1f4747a4
File added
official/utils/testing/reference_data/reference_data_test/uniform_random/model.ckpt.data-00000-of-00001
0 → 100644
View file @
1f4747a4
ʼ|?
\ No newline at end of file
official/utils/testing/reference_data/reference_data_test/uniform_random/model.ckpt.index
0 → 100644
View file @
1f4747a4
File added
official/utils/testing/reference_data/reference_data_test/uniform_random/results.json
0 → 100644
View file @
1f4747a4
[
0.9872556924819946
]
\ No newline at end of file
official/utils/testing/reference_data/reference_data_test/uniform_random/tf_version.json
0 → 100644
View file @
1f4747a4
[
"1.8.0-dev20180325"
,
"v1.7.0-rc1-750-g6c1737e6c8"
]
\ No newline at end of file
official/utils/testing/reference_data/resnet/batch-size-32_bottleneck_projection_version-1_width-8_channels-4/expected_graph
0 → 100644
View file @
1f4747a4
File added
official/utils/testing/reference_data/resnet/batch-size-32_bottleneck_projection_version-1_width-8_channels-4/model.ckpt.data-00000-of-00001
0 → 100644
View file @
1f4747a4
File added
official/utils/testing/reference_data/resnet/batch-size-32_bottleneck_projection_version-1_width-8_channels-4/model.ckpt.index
0 → 100644
View file @
1f4747a4
File added
official/utils/testing/reference_data/resnet/batch-size-32_bottleneck_projection_version-1_width-8_channels-4/results.json
0 → 100644
View file @
1f4747a4
[
32
,
8
,
8
,
4
,
0.08920872211456299
,
0.8918969631195068
,
4064.7060546875
,
32
,
4
,
4
,
8
,
0.0
,
0.8524793982505798
,
2294.368896484375
]
\ No newline at end of file
official/utils/testing/reference_data/resnet/batch-size-32_bottleneck_projection_version-1_width-8_channels-4/tf_version.json
0 → 100644
View file @
1f4747a4
[
"1.8.0-dev20180325"
,
"v1.7.0-rc1-750-g6c1737e6c8"
]
\ No newline at end of file
official/utils/testing/reference_data/resnet/batch-size-32_bottleneck_projection_version-2_width-8_channels-4/expected_graph
0 → 100644
View file @
1f4747a4
File added
Prev
1
2
3
4
5
6
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment