Unverified Commit 47a59023 authored by Toby Boyd's avatar Toby Boyd Committed by GitHub
Browse files

Fix unit tests failures. (#7086)

parent 513fdbb2
......@@ -22,6 +22,7 @@ import tensorflow.contrib.eager as tfe # pylint: disable=g-bad-import-order
from official.mnist import mnist
from official.mnist import mnist_eager
from official.utils.misc import keras_utils
def device():
......@@ -62,6 +63,11 @@ def evaluate(defun=False):
class MNISTTest(tf.test.TestCase):
"""Run tests for MNIST eager loop."""
def setUp(self):
if not keras_utils.is_v2_0():
tf.compat.v1.enable_v2_behavior()
super(MNISTTest, self).setUp()
def test_train(self):
train(defun=False)
......@@ -76,5 +82,4 @@ class MNISTTest(tf.test.TestCase):
if __name__ == "__main__":
tfe.enable_eager_execution()
tf.test.main()
......@@ -24,8 +24,8 @@ import tensorflow as tf
from official.resnet import imagenet_main
from official.resnet.ctl import ctl_imagenet_main
from official.resnet.ctl import ctl_common
from official.utils.misc import keras_utils
from official.utils.testing import integration
from official.resnet.keras import keras_common
# pylint: disable=ungrouped-imports
from tensorflow.python.eager import context
from tensorflow.python.platform import googletest
......@@ -54,6 +54,8 @@ class CtlImagenetTest(googletest.TestCase):
def setUp(self):
super(CtlImagenetTest, self).setUp()
if not keras_utils.is_v2_0():
tf.compat.v1.enable_v2_behavior()
imagenet_main.NUM_IMAGES['validation'] = 4
def tearDown(self):
......@@ -64,9 +66,9 @@ class CtlImagenetTest(googletest.TestCase):
"""Test Keras model with 1 GPU, no distribution strategy."""
extra_flags = [
"-distribution_strategy", "off",
"-model_dir", "ctl_imagenet_no_dist_strat",
"-data_format", "channels_last",
'-distribution_strategy', 'off',
'-model_dir', 'ctl_imagenet_no_dist_strat',
'-data_format', 'channels_last',
]
extra_flags = extra_flags + self._extra_flags
......@@ -78,15 +80,15 @@ class CtlImagenetTest(googletest.TestCase):
def test_end_to_end_2_gpu(self):
"""Test Keras model with 2 GPUs."""
num_gpus = "2"
num_gpus = '2'
if context.num_gpus() < 2:
num_gpus = "0"
num_gpus = '0'
extra_flags = [
"-num_gpus", num_gpus,
"-distribution_strategy", "default",
"-model_dir", "ctl_imagenet_2_gpu",
"-data_format", "channels_last",
'-num_gpus', num_gpus,
'-distribution_strategy', 'default',
'-model_dir', 'ctl_imagenet_2_gpu',
'-data_format', 'channels_last',
]
extra_flags = extra_flags + self._extra_flags
......@@ -97,6 +99,4 @@ class CtlImagenetTest(googletest.TestCase):
)
if __name__ == '__main__':
if not keras_common.is_v2_0():
tf.enable_v2_behavior()
googletest.main()
......@@ -20,44 +20,54 @@ from __future__ import print_function
import os
import re
import unittest
from absl import flags
from absl.testing import flagsaver
import tensorflow as tf
from official.transformer.v2 import misc
from official.transformer.v2 import transformer_main as tm
FLAGS = flags.FLAGS
FIXED_TIMESTAMP = "my_time_stamp"
WEIGHT_PATTERN = re.compile(r"weights-epoch-.+\.hdf5")
FIXED_TIMESTAMP = 'my_time_stamp'
WEIGHT_PATTERN = re.compile(r'weights-epoch-.+\.hdf5')
def _generate_file(filepath, lines):
with open(filepath, "w") as f:
with open(filepath, 'w') as f:
for l in lines:
f.write("{}\n".format(l))
f.write('{}\n'.format(l))
class TransformerTaskTest(tf.test.TestCase):
local_flags = None
def setUp(self):
temp_dir = self.get_temp_dir()
if TransformerTaskTest.local_flags is None:
misc.define_transformer_flags()
# Loads flags, array cannot be blank.
flags.FLAGS(['foo'])
TransformerTaskTest.local_flags = flagsaver.save_flag_values()
else:
flagsaver.restore_flag_values(TransformerTaskTest.local_flags)
FLAGS.model_dir = os.path.join(temp_dir, FIXED_TIMESTAMP)
FLAGS.param_set = "tiny"
FLAGS.param_set = 'tiny'
FLAGS.use_synthetic_data = True
FLAGS.steps_between_evals = 1
FLAGS.train_steps = 2
FLAGS.validation_steps = 1
FLAGS.batch_size = 8
FLAGS.num_gpus = 1
FLAGS.distribution_strategy = "off"
FLAGS.dtype = "fp32"
FLAGS.distribution_strategy = 'off'
FLAGS.dtype = 'fp32'
self.model_dir = FLAGS.model_dir
self.temp_dir = temp_dir
self.vocab_file = os.path.join(temp_dir, "vocab")
self.vocab_size = misc.get_model_params(FLAGS.param_set, 0)["vocab_size"]
self.bleu_source = os.path.join(temp_dir, "bleu_source")
self.bleu_ref = os.path.join(temp_dir, "bleu_ref")
self.vocab_file = os.path.join(temp_dir, 'vocab')
self.vocab_size = misc.get_model_params(FLAGS.param_set, 0)['vocab_size']
self.bleu_source = os.path.join(temp_dir, 'bleu_source')
self.bleu_ref = os.path.join(temp_dir, 'bleu_ref')
self.orig_policy = tf.keras.mixed_precision.experimental.global_policy()
def tearDown(self):
......@@ -75,23 +85,26 @@ class TransformerTaskTest(tf.test.TestCase):
t = tm.TransformerTask(FLAGS)
t.train()
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_train_1_gpu_with_dist_strat(self):
FLAGS.distribution_strategy = "one_device"
FLAGS.distribution_strategy = 'one_device'
t = tm.TransformerTask(FLAGS)
t.train()
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_train_2_gpu(self):
FLAGS.distribution_strategy = "mirrored"
FLAGS.distribution_strategy = 'mirrored'
FLAGS.num_gpus = 2
FLAGS.param_set = "base"
FLAGS.param_set = 'base'
t = tm.TransformerTask(FLAGS)
t.train()
@unittest.skipUnless(tf.test.is_built_with_cuda(), 'requires GPU')
def test_train_2_gpu_fp16(self):
FLAGS.distribution_strategy = "mirrored"
FLAGS.distribution_strategy = 'mirrored'
FLAGS.num_gpus = 2
FLAGS.param_set = "base"
FLAGS.dtype = "fp16"
FLAGS.param_set = 'base'
FLAGS.dtype = 'fp16'
t = tm.TransformerTask(FLAGS)
t.train()
......@@ -107,15 +120,15 @@ class TransformerTaskTest(tf.test.TestCase):
]
tokens += ["'{}'".format(i) for i in range(self.vocab_size - len(tokens))]
_generate_file(self.vocab_file, tokens)
_generate_file(self.bleu_source, ["a b", "c d"])
_generate_file(self.bleu_ref, ["a b", "d c"])
_generate_file(self.bleu_source, ['a b', 'c d'])
_generate_file(self.bleu_ref, ['a b', 'd c'])
# Update flags.
update_flags = [
"ignored_program_name",
"--vocab_file={}".format(self.vocab_file),
"--bleu_source={}".format(self.bleu_source),
"--bleu_ref={}".format(self.bleu_ref),
'ignored_program_name',
'--vocab_file={}'.format(self.vocab_file),
'--bleu_source={}'.format(self.bleu_source),
'--bleu_ref={}'.format(self.bleu_ref),
]
if extra_flags:
update_flags.extend(extra_flags)
......@@ -127,7 +140,7 @@ class TransformerTaskTest(tf.test.TestCase):
t.predict()
def test_predict_fp16(self):
self._prepare_files_and_flags("--dtype=fp16")
self._prepare_files_and_flags('--dtype=fp16')
t = tm.TransformerTask(FLAGS)
t.predict()
......@@ -137,6 +150,5 @@ class TransformerTaskTest(tf.test.TestCase):
t.eval()
if __name__ == "__main__":
misc.define_transformer_flags()
if __name__ == '__main__':
tf.test.main()
......@@ -14,7 +14,7 @@
# limitations under the License.
# ==============================================================================
# Presubmit script that run tests and lint under local environment.
# Presubmit script that runs tests and lint under local environment.
# Make sure that tensorflow and pylint is installed.
# usage: models >: ./official/utils/testing/scripts/presubmit.sh
# usage: models >: ./official/utils/testing/scripts/presubmit.sh lint py2_test py3_test
......@@ -26,16 +26,14 @@ MODEL_ROOT="$(pwd)"
export PYTHONPATH="$PYTHONPATH:${MODEL_ROOT}"
cd official
lint() {
local exit_code=0
RC_FILE="utils/testing/pylint.rcfile"
RC_FILE="official/utils/testing/pylint.rcfile"
PROTO_SKIP="DO\sNOT\sEDIT!"
echo "===========Running lint test============"
for file in `find . -name '*.py' ! -name '*test.py' -print`
for file in `find official/ -name '*.py' ! -name '*test.py' -print`
do
if grep ${PROTO_SKIP} ${file}; then
echo "Linting ${file} (Skipped: Machine generated file)"
......@@ -46,7 +44,7 @@ lint() {
done
# More lenient for test files.
for file in `find . -name '*test.py' -print`
for file in `find official/ -name '*test.py' -print`
do
echo "Linting ${file}"
pylint --rcfile="${RC_FILE}" --disable=missing-docstring,protected-access "${file}" || exit_code=$?
......@@ -61,9 +59,9 @@ py_test() {
echo "===========Running Python test============"
for test_file in `find . -name '*test.py' -print`
for test_file in `find official/ -name '*test.py' -print`
do
echo "Testing ${test_file}"
echo "####=======Testing ${test_file}=======####"
${PY_BINARY} "${test_file}" || exit_code=$?
done
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment