Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
47a59023
Unverified
Commit
47a59023
authored
Jun 22, 2019
by
Toby Boyd
Committed by
GitHub
Jun 22, 2019
Browse files
Fix unit tests failures. (#7086)
parent
513fdbb2
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
62 additions
and
47 deletions
+62
-47
official/mnist/mnist_eager_test.py
official/mnist/mnist_eager_test.py
+6
-1
official/resnet/ctl/ctl_imagenet_test.py
official/resnet/ctl/ctl_imagenet_test.py
+12
-12
official/transformer/v2/transformer_main_test.py
official/transformer/v2/transformer_main_test.py
+38
-26
official/utils/testing/scripts/presubmit.sh
official/utils/testing/scripts/presubmit.sh
+6
-8
No files found.
official/mnist/mnist_eager_test.py
View file @
47a59023
...
@@ -22,6 +22,7 @@ import tensorflow.contrib.eager as tfe # pylint: disable=g-bad-import-order
...
@@ -22,6 +22,7 @@ import tensorflow.contrib.eager as tfe # pylint: disable=g-bad-import-order
from
official.mnist
import
mnist
from
official.mnist
import
mnist
from
official.mnist
import
mnist_eager
from
official.mnist
import
mnist_eager
from
official.utils.misc
import
keras_utils
def
device
():
def
device
():
...
@@ -62,6 +63,11 @@ def evaluate(defun=False):
...
@@ -62,6 +63,11 @@ def evaluate(defun=False):
class
MNISTTest
(
tf
.
test
.
TestCase
):
class
MNISTTest
(
tf
.
test
.
TestCase
):
"""Run tests for MNIST eager loop."""
"""Run tests for MNIST eager loop."""
def
setUp
(
self
):
if
not
keras_utils
.
is_v2_0
():
tf
.
compat
.
v1
.
enable_v2_behavior
()
super
(
MNISTTest
,
self
).
setUp
()
def
test_train
(
self
):
def
test_train
(
self
):
train
(
defun
=
False
)
train
(
defun
=
False
)
...
@@ -76,5 +82,4 @@ class MNISTTest(tf.test.TestCase):
...
@@ -76,5 +82,4 @@ class MNISTTest(tf.test.TestCase):
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
tfe
.
enable_eager_execution
()
tf
.
test
.
main
()
tf
.
test
.
main
()
official/resnet/ctl/ctl_imagenet_test.py
View file @
47a59023
...
@@ -24,8 +24,8 @@ import tensorflow as tf
...
@@ -24,8 +24,8 @@ import tensorflow as tf
from
official.resnet
import
imagenet_main
from
official.resnet
import
imagenet_main
from
official.resnet.ctl
import
ctl_imagenet_main
from
official.resnet.ctl
import
ctl_imagenet_main
from
official.resnet.ctl
import
ctl_common
from
official.resnet.ctl
import
ctl_common
from
official.utils.misc
import
keras_utils
from
official.utils.testing
import
integration
from
official.utils.testing
import
integration
from
official.resnet.keras
import
keras_common
# pylint: disable=ungrouped-imports
# pylint: disable=ungrouped-imports
from
tensorflow.python.eager
import
context
from
tensorflow.python.eager
import
context
from
tensorflow.python.platform
import
googletest
from
tensorflow.python.platform
import
googletest
...
@@ -54,6 +54,8 @@ class CtlImagenetTest(googletest.TestCase):
...
@@ -54,6 +54,8 @@ class CtlImagenetTest(googletest.TestCase):
def
setUp
(
self
):
def
setUp
(
self
):
super
(
CtlImagenetTest
,
self
).
setUp
()
super
(
CtlImagenetTest
,
self
).
setUp
()
if
not
keras_utils
.
is_v2_0
():
tf
.
compat
.
v1
.
enable_v2_behavior
()
imagenet_main
.
NUM_IMAGES
[
'validation'
]
=
4
imagenet_main
.
NUM_IMAGES
[
'validation'
]
=
4
def
tearDown
(
self
):
def
tearDown
(
self
):
...
@@ -64,9 +66,9 @@ class CtlImagenetTest(googletest.TestCase):
...
@@ -64,9 +66,9 @@ class CtlImagenetTest(googletest.TestCase):
"""Test Keras model with 1 GPU, no distribution strategy."""
"""Test Keras model with 1 GPU, no distribution strategy."""
extra_flags
=
[
extra_flags
=
[
"
-distribution_strategy
"
,
"
off
"
,
'
-distribution_strategy
'
,
'
off
'
,
"
-model_dir
"
,
"
ctl_imagenet_no_dist_strat
"
,
'
-model_dir
'
,
'
ctl_imagenet_no_dist_strat
'
,
"
-data_format
"
,
"
channels_last
"
,
'
-data_format
'
,
'
channels_last
'
,
]
]
extra_flags
=
extra_flags
+
self
.
_extra_flags
extra_flags
=
extra_flags
+
self
.
_extra_flags
...
@@ -78,15 +80,15 @@ class CtlImagenetTest(googletest.TestCase):
...
@@ -78,15 +80,15 @@ class CtlImagenetTest(googletest.TestCase):
def
test_end_to_end_2_gpu
(
self
):
def
test_end_to_end_2_gpu
(
self
):
"""Test Keras model with 2 GPUs."""
"""Test Keras model with 2 GPUs."""
num_gpus
=
"2"
num_gpus
=
'2'
if
context
.
num_gpus
()
<
2
:
if
context
.
num_gpus
()
<
2
:
num_gpus
=
"0"
num_gpus
=
'0'
extra_flags
=
[
extra_flags
=
[
"
-num_gpus
"
,
num_gpus
,
'
-num_gpus
'
,
num_gpus
,
"
-distribution_strategy
"
,
"
default
"
,
'
-distribution_strategy
'
,
'
default
'
,
"
-model_dir
"
,
"
ctl_imagenet_2_gpu
"
,
'
-model_dir
'
,
'
ctl_imagenet_2_gpu
'
,
"
-data_format
"
,
"
channels_last
"
,
'
-data_format
'
,
'
channels_last
'
,
]
]
extra_flags
=
extra_flags
+
self
.
_extra_flags
extra_flags
=
extra_flags
+
self
.
_extra_flags
...
@@ -97,6 +99,4 @@ class CtlImagenetTest(googletest.TestCase):
...
@@ -97,6 +99,4 @@ class CtlImagenetTest(googletest.TestCase):
)
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
if
not
keras_common
.
is_v2_0
():
tf
.
enable_v2_behavior
()
googletest
.
main
()
googletest
.
main
()
official/transformer/v2/transformer_main_test.py
View file @
47a59023
...
@@ -20,44 +20,54 @@ from __future__ import print_function
...
@@ -20,44 +20,54 @@ from __future__ import print_function
import
os
import
os
import
re
import
re
import
unittest
from
absl
import
flags
from
absl
import
flags
from
absl.testing
import
flagsaver
import
tensorflow
as
tf
import
tensorflow
as
tf
from
official.transformer.v2
import
misc
from
official.transformer.v2
import
misc
from
official.transformer.v2
import
transformer_main
as
tm
from
official.transformer.v2
import
transformer_main
as
tm
FLAGS
=
flags
.
FLAGS
FLAGS
=
flags
.
FLAGS
FIXED_TIMESTAMP
=
"
my_time_stamp
"
FIXED_TIMESTAMP
=
'
my_time_stamp
'
WEIGHT_PATTERN
=
re
.
compile
(
r
"
weights-epoch-.+\.hdf5
"
)
WEIGHT_PATTERN
=
re
.
compile
(
r
'
weights-epoch-.+\.hdf5
'
)
def
_generate_file
(
filepath
,
lines
):
def
_generate_file
(
filepath
,
lines
):
with
open
(
filepath
,
"w"
)
as
f
:
with
open
(
filepath
,
'w'
)
as
f
:
for
l
in
lines
:
for
l
in
lines
:
f
.
write
(
"
{}
\n
"
.
format
(
l
))
f
.
write
(
'
{}
\n
'
.
format
(
l
))
class
TransformerTaskTest
(
tf
.
test
.
TestCase
):
class
TransformerTaskTest
(
tf
.
test
.
TestCase
):
local_flags
=
None
def
setUp
(
self
):
def
setUp
(
self
):
temp_dir
=
self
.
get_temp_dir
()
temp_dir
=
self
.
get_temp_dir
()
if
TransformerTaskTest
.
local_flags
is
None
:
misc
.
define_transformer_flags
()
# Loads flags, array cannot be blank.
flags
.
FLAGS
([
'foo'
])
TransformerTaskTest
.
local_flags
=
flagsaver
.
save_flag_values
()
else
:
flagsaver
.
restore_flag_values
(
TransformerTaskTest
.
local_flags
)
FLAGS
.
model_dir
=
os
.
path
.
join
(
temp_dir
,
FIXED_TIMESTAMP
)
FLAGS
.
model_dir
=
os
.
path
.
join
(
temp_dir
,
FIXED_TIMESTAMP
)
FLAGS
.
param_set
=
"
tiny
"
FLAGS
.
param_set
=
'
tiny
'
FLAGS
.
use_synthetic_data
=
True
FLAGS
.
use_synthetic_data
=
True
FLAGS
.
steps_between_evals
=
1
FLAGS
.
steps_between_evals
=
1
FLAGS
.
train_steps
=
2
FLAGS
.
train_steps
=
2
FLAGS
.
validation_steps
=
1
FLAGS
.
validation_steps
=
1
FLAGS
.
batch_size
=
8
FLAGS
.
batch_size
=
8
FLAGS
.
num_gpus
=
1
FLAGS
.
num_gpus
=
1
FLAGS
.
distribution_strategy
=
"
off
"
FLAGS
.
distribution_strategy
=
'
off
'
FLAGS
.
dtype
=
"
fp32
"
FLAGS
.
dtype
=
'
fp32
'
self
.
model_dir
=
FLAGS
.
model_dir
self
.
model_dir
=
FLAGS
.
model_dir
self
.
temp_dir
=
temp_dir
self
.
temp_dir
=
temp_dir
self
.
vocab_file
=
os
.
path
.
join
(
temp_dir
,
"
vocab
"
)
self
.
vocab_file
=
os
.
path
.
join
(
temp_dir
,
'
vocab
'
)
self
.
vocab_size
=
misc
.
get_model_params
(
FLAGS
.
param_set
,
0
)[
"
vocab_size
"
]
self
.
vocab_size
=
misc
.
get_model_params
(
FLAGS
.
param_set
,
0
)[
'
vocab_size
'
]
self
.
bleu_source
=
os
.
path
.
join
(
temp_dir
,
"
bleu_source
"
)
self
.
bleu_source
=
os
.
path
.
join
(
temp_dir
,
'
bleu_source
'
)
self
.
bleu_ref
=
os
.
path
.
join
(
temp_dir
,
"
bleu_ref
"
)
self
.
bleu_ref
=
os
.
path
.
join
(
temp_dir
,
'
bleu_ref
'
)
self
.
orig_policy
=
tf
.
keras
.
mixed_precision
.
experimental
.
global_policy
()
self
.
orig_policy
=
tf
.
keras
.
mixed_precision
.
experimental
.
global_policy
()
def
tearDown
(
self
):
def
tearDown
(
self
):
...
@@ -75,23 +85,26 @@ class TransformerTaskTest(tf.test.TestCase):
...
@@ -75,23 +85,26 @@ class TransformerTaskTest(tf.test.TestCase):
t
=
tm
.
TransformerTask
(
FLAGS
)
t
=
tm
.
TransformerTask
(
FLAGS
)
t
.
train
()
t
.
train
()
@
unittest
.
skipUnless
(
tf
.
test
.
is_built_with_cuda
(),
'requires GPU'
)
def
test_train_1_gpu_with_dist_strat
(
self
):
def
test_train_1_gpu_with_dist_strat
(
self
):
FLAGS
.
distribution_strategy
=
"
one_device
"
FLAGS
.
distribution_strategy
=
'
one_device
'
t
=
tm
.
TransformerTask
(
FLAGS
)
t
=
tm
.
TransformerTask
(
FLAGS
)
t
.
train
()
t
.
train
()
@
unittest
.
skipUnless
(
tf
.
test
.
is_built_with_cuda
(),
'requires GPU'
)
def
test_train_2_gpu
(
self
):
def
test_train_2_gpu
(
self
):
FLAGS
.
distribution_strategy
=
"
mirrored
"
FLAGS
.
distribution_strategy
=
'
mirrored
'
FLAGS
.
num_gpus
=
2
FLAGS
.
num_gpus
=
2
FLAGS
.
param_set
=
"
base
"
FLAGS
.
param_set
=
'
base
'
t
=
tm
.
TransformerTask
(
FLAGS
)
t
=
tm
.
TransformerTask
(
FLAGS
)
t
.
train
()
t
.
train
()
@
unittest
.
skipUnless
(
tf
.
test
.
is_built_with_cuda
(),
'requires GPU'
)
def
test_train_2_gpu_fp16
(
self
):
def
test_train_2_gpu_fp16
(
self
):
FLAGS
.
distribution_strategy
=
"
mirrored
"
FLAGS
.
distribution_strategy
=
'
mirrored
'
FLAGS
.
num_gpus
=
2
FLAGS
.
num_gpus
=
2
FLAGS
.
param_set
=
"
base
"
FLAGS
.
param_set
=
'
base
'
FLAGS
.
dtype
=
"
fp16
"
FLAGS
.
dtype
=
'
fp16
'
t
=
tm
.
TransformerTask
(
FLAGS
)
t
=
tm
.
TransformerTask
(
FLAGS
)
t
.
train
()
t
.
train
()
...
@@ -107,15 +120,15 @@ class TransformerTaskTest(tf.test.TestCase):
...
@@ -107,15 +120,15 @@ class TransformerTaskTest(tf.test.TestCase):
]
]
tokens
+=
[
"'{}'"
.
format
(
i
)
for
i
in
range
(
self
.
vocab_size
-
len
(
tokens
))]
tokens
+=
[
"'{}'"
.
format
(
i
)
for
i
in
range
(
self
.
vocab_size
-
len
(
tokens
))]
_generate_file
(
self
.
vocab_file
,
tokens
)
_generate_file
(
self
.
vocab_file
,
tokens
)
_generate_file
(
self
.
bleu_source
,
[
"
a b
"
,
"
c d
"
])
_generate_file
(
self
.
bleu_source
,
[
'
a b
'
,
'
c d
'
])
_generate_file
(
self
.
bleu_ref
,
[
"
a b
"
,
"
d c
"
])
_generate_file
(
self
.
bleu_ref
,
[
'
a b
'
,
'
d c
'
])
# Update flags.
# Update flags.
update_flags
=
[
update_flags
=
[
"
ignored_program_name
"
,
'
ignored_program_name
'
,
"
--vocab_file={}
"
.
format
(
self
.
vocab_file
),
'
--vocab_file={}
'
.
format
(
self
.
vocab_file
),
"
--bleu_source={}
"
.
format
(
self
.
bleu_source
),
'
--bleu_source={}
'
.
format
(
self
.
bleu_source
),
"
--bleu_ref={}
"
.
format
(
self
.
bleu_ref
),
'
--bleu_ref={}
'
.
format
(
self
.
bleu_ref
),
]
]
if
extra_flags
:
if
extra_flags
:
update_flags
.
extend
(
extra_flags
)
update_flags
.
extend
(
extra_flags
)
...
@@ -127,7 +140,7 @@ class TransformerTaskTest(tf.test.TestCase):
...
@@ -127,7 +140,7 @@ class TransformerTaskTest(tf.test.TestCase):
t
.
predict
()
t
.
predict
()
def
test_predict_fp16
(
self
):
def
test_predict_fp16
(
self
):
self
.
_prepare_files_and_flags
(
"
--dtype=fp16
"
)
self
.
_prepare_files_and_flags
(
'
--dtype=fp16
'
)
t
=
tm
.
TransformerTask
(
FLAGS
)
t
=
tm
.
TransformerTask
(
FLAGS
)
t
.
predict
()
t
.
predict
()
...
@@ -137,6 +150,5 @@ class TransformerTaskTest(tf.test.TestCase):
...
@@ -137,6 +150,5 @@ class TransformerTaskTest(tf.test.TestCase):
t
.
eval
()
t
.
eval
()
if
__name__
==
"__main__"
:
if
__name__
==
'__main__'
:
misc
.
define_transformer_flags
()
tf
.
test
.
main
()
tf
.
test
.
main
()
official/utils/testing/scripts/presubmit.sh
View file @
47a59023
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
# limitations under the License.
# limitations under the License.
# ==============================================================================
# ==============================================================================
# Presubmit script that run tests and lint under local environment.
# Presubmit script that run
s
tests and lint under local environment.
# Make sure that tensorflow and pylint is installed.
# Make sure that tensorflow and pylint is installed.
# usage: models >: ./official/utils/testing/scripts/presubmit.sh
# usage: models >: ./official/utils/testing/scripts/presubmit.sh
# usage: models >: ./official/utils/testing/scripts/presubmit.sh lint py2_test py3_test
# usage: models >: ./official/utils/testing/scripts/presubmit.sh lint py2_test py3_test
...
@@ -26,16 +26,14 @@ MODEL_ROOT="$(pwd)"
...
@@ -26,16 +26,14 @@ MODEL_ROOT="$(pwd)"
export
PYTHONPATH
=
"
$PYTHONPATH
:
${
MODEL_ROOT
}
"
export
PYTHONPATH
=
"
$PYTHONPATH
:
${
MODEL_ROOT
}
"
cd
official
lint
()
{
lint
()
{
local
exit_code
=
0
local
exit_code
=
0
RC_FILE
=
"utils/testing/pylint.rcfile"
RC_FILE
=
"
official/
utils/testing/pylint.rcfile"
PROTO_SKIP
=
"DO
\s
NOT
\s
EDIT!"
PROTO_SKIP
=
"DO
\s
NOT
\s
EDIT!"
echo
"===========Running lint test============"
echo
"===========Running lint test============"
for
file
in
`
find
.
-name
'*.py'
!
-name
'*test.py'
-print
`
for
file
in
`
find
official/
-name
'*.py'
!
-name
'*test.py'
-print
`
do
do
if
grep
${
PROTO_SKIP
}
${
file
}
;
then
if
grep
${
PROTO_SKIP
}
${
file
}
;
then
echo
"Linting
${
file
}
(Skipped: Machine generated file)"
echo
"Linting
${
file
}
(Skipped: Machine generated file)"
...
@@ -46,7 +44,7 @@ lint() {
...
@@ -46,7 +44,7 @@ lint() {
done
done
# More lenient for test files.
# More lenient for test files.
for
file
in
`
find
.
-name
'*test.py'
-print
`
for
file
in
`
find
official/
-name
'*test.py'
-print
`
do
do
echo
"Linting
${
file
}
"
echo
"Linting
${
file
}
"
pylint
--rcfile
=
"
${
RC_FILE
}
"
--disable
=
missing-docstring,protected-access
"
${
file
}
"
||
exit_code
=
$?
pylint
--rcfile
=
"
${
RC_FILE
}
"
--disable
=
missing-docstring,protected-access
"
${
file
}
"
||
exit_code
=
$?
...
@@ -61,9 +59,9 @@ py_test() {
...
@@ -61,9 +59,9 @@ py_test() {
echo
"===========Running Python test============"
echo
"===========Running Python test============"
for
test_file
in
`
find
.
-name
'*test.py'
-print
`
for
test_file
in
`
find
official/
-name
'*test.py'
-print
`
do
do
echo
"Testing
${
test_file
}
"
echo
"
####=======
Testing
${
test_file
}
=======####
"
${
PY_BINARY
}
"
${
test_file
}
"
||
exit_code
=
$?
${
PY_BINARY
}
"
${
test_file
}
"
||
exit_code
=
$?
done
done
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment