"awq_cuda/git@developer.sourcefind.cn:OpenDAS/autoawq.git" did not exist on "2fa3a5d1a1ab2019321e898add3fcbe4898bf8cb"
Commit 5b0ef1fc authored by Nimit Nigania's avatar Nimit Nigania
Browse files

Merge branch 'master' into ncf_f16

parents 1cba90f3 bf748370
...@@ -26,7 +26,7 @@ import pandas as pd ...@@ -26,7 +26,7 @@ import pandas as pd
import tensorflow as tf import tensorflow as tf
# pylint: disable=g-bad-import-order # pylint: disable=g-bad-import-order
from official.boosted_trees import train_higgs from official.r1.boosted_trees import train_higgs
from official.utils.misc import keras_utils from official.utils.misc import keras_utils
from official.utils.testing import integration from official.utils.testing import integration
...@@ -133,7 +133,7 @@ class BaseTest(tf.test.TestCase): ...@@ -133,7 +133,7 @@ class BaseTest(tf.test.TestCase):
"--eval_start", "12", "--eval_start", "12",
"--eval_count", "8", "--eval_count", "8",
], ],
synth=False, max_train=None) synth=False)
self.assertTrue(tf.gfile.Exists(os.path.join(model_dir, "checkpoint"))) self.assertTrue(tf.gfile.Exists(os.path.join(model_dir, "checkpoint")))
@unittest.skipIf(keras_utils.is_v2_0(), "TF 1.0 only test.") @unittest.skipIf(keras_utils.is_v2_0(), "TF 1.0 only test.")
...@@ -152,7 +152,7 @@ class BaseTest(tf.test.TestCase): ...@@ -152,7 +152,7 @@ class BaseTest(tf.test.TestCase):
"--eval_start", "12", "--eval_start", "12",
"--eval_count", "8", "--eval_count", "8",
], ],
synth=False, max_train=None) synth=False)
self.assertTrue(tf.gfile.Exists(os.path.join(model_dir, "checkpoint"))) self.assertTrue(tf.gfile.Exists(os.path.join(model_dir, "checkpoint")))
self.assertTrue(tf.gfile.Exists(os.path.join(export_dir))) self.assertTrue(tf.gfile.Exists(os.path.join(export_dir)))
......
...@@ -168,13 +168,15 @@ class BaseTest(tf.test.TestCase): ...@@ -168,13 +168,15 @@ class BaseTest(tf.test.TestCase):
def test_cifar10_end_to_end_synthetic_v1(self): def test_cifar10_end_to_end_synthetic_v1(self):
integration.run_synthetic( integration.run_synthetic(
main=cifar10_main.run_cifar, tmp_root=self.get_temp_dir(), main=cifar10_main.run_cifar, tmp_root=self.get_temp_dir(),
extra_flags=['-resnet_version', '1', '-batch_size', '4'] extra_flags=['-resnet_version', '1', '-batch_size', '4',
'--max_train_steps', '1']
) )
def test_cifar10_end_to_end_synthetic_v2(self): def test_cifar10_end_to_end_synthetic_v2(self):
integration.run_synthetic( integration.run_synthetic(
main=cifar10_main.run_cifar, tmp_root=self.get_temp_dir(), main=cifar10_main.run_cifar, tmp_root=self.get_temp_dir(),
extra_flags=['-resnet_version', '2', '-batch_size', '4'] extra_flags=['-resnet_version', '2', '-batch_size', '4',
'--max_train_steps', '1']
) )
......
...@@ -282,41 +282,43 @@ class BaseTest(tf.test.TestCase): ...@@ -282,41 +282,43 @@ class BaseTest(tf.test.TestCase):
def test_imagenet_end_to_end_synthetic_v1(self): def test_imagenet_end_to_end_synthetic_v1(self):
integration.run_synthetic( integration.run_synthetic(
main=imagenet_main.run_imagenet, tmp_root=self.get_temp_dir(), main=imagenet_main.run_imagenet, tmp_root=self.get_temp_dir(),
extra_flags=['-resnet_version', '1', '-batch_size', '4'] extra_flags=['-resnet_version', '1', '-batch_size', '4',
'--max_train_steps', '1']
) )
def test_imagenet_end_to_end_synthetic_v2(self): def test_imagenet_end_to_end_synthetic_v2(self):
integration.run_synthetic( integration.run_synthetic(
main=imagenet_main.run_imagenet, tmp_root=self.get_temp_dir(), main=imagenet_main.run_imagenet, tmp_root=self.get_temp_dir(),
extra_flags=['-resnet_version', '2', '-batch_size', '4'] extra_flags=['-resnet_version', '2', '-batch_size', '4',
'--max_train_steps', '1']
) )
def test_imagenet_end_to_end_synthetic_v1_tiny(self): def test_imagenet_end_to_end_synthetic_v1_tiny(self):
integration.run_synthetic( integration.run_synthetic(
main=imagenet_main.run_imagenet, tmp_root=self.get_temp_dir(), main=imagenet_main.run_imagenet, tmp_root=self.get_temp_dir(),
extra_flags=['-resnet_version', '1', '-batch_size', '4', extra_flags=['-resnet_version', '1', '-batch_size', '4',
'-resnet_size', '18'] '-resnet_size', '18', '--max_train_steps', '1']
) )
def test_imagenet_end_to_end_synthetic_v2_tiny(self): def test_imagenet_end_to_end_synthetic_v2_tiny(self):
integration.run_synthetic( integration.run_synthetic(
main=imagenet_main.run_imagenet, tmp_root=self.get_temp_dir(), main=imagenet_main.run_imagenet, tmp_root=self.get_temp_dir(),
extra_flags=['-resnet_version', '2', '-batch_size', '4', extra_flags=['-resnet_version', '2', '-batch_size', '4',
'-resnet_size', '18'] '-resnet_size', '18', '--max_train_steps', '1']
) )
def test_imagenet_end_to_end_synthetic_v1_huge(self): def test_imagenet_end_to_end_synthetic_v1_huge(self):
integration.run_synthetic( integration.run_synthetic(
main=imagenet_main.run_imagenet, tmp_root=self.get_temp_dir(), main=imagenet_main.run_imagenet, tmp_root=self.get_temp_dir(),
extra_flags=['-resnet_version', '1', '-batch_size', '4', extra_flags=['-resnet_version', '1', '-batch_size', '4',
'-resnet_size', '200'] '-resnet_size', '200', '--max_train_steps', '1']
) )
def test_imagenet_end_to_end_synthetic_v2_huge(self): def test_imagenet_end_to_end_synthetic_v2_huge(self):
integration.run_synthetic( integration.run_synthetic(
main=imagenet_main.run_imagenet, tmp_root=self.get_temp_dir(), main=imagenet_main.run_imagenet, tmp_root=self.get_temp_dir(),
extra_flags=['-resnet_version', '2', '-batch_size', '4', extra_flags=['-resnet_version', '2', '-batch_size', '4',
'-resnet_size', '200'] '-resnet_size', '200', '--max_train_steps', '1']
) )
......
...@@ -33,7 +33,7 @@ import tensorflow as tf ...@@ -33,7 +33,7 @@ import tensorflow as tf
from official.r1.resnet import imagenet_preprocessing from official.r1.resnet import imagenet_preprocessing
from official.r1.resnet import resnet_model from official.r1.resnet import resnet_model
from official.utils.export import export from official.r1.utils import export
from official.utils.flags import core as flags_core from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper from official.utils.logs import hooks_helper
from official.utils.logs import logger from official.utils.logs import logger
...@@ -725,14 +725,18 @@ def define_resnet_flags(resnet_size_choices=None, dynamic_loss_scale=False, ...@@ -725,14 +725,18 @@ def define_resnet_flags(resnet_size_choices=None, dynamic_loss_scale=False,
"""Add flags and validators for ResNet.""" """Add flags and validators for ResNet."""
flags_core.define_base() flags_core.define_base()
flags_core.define_performance(num_parallel_calls=False, flags_core.define_performance(num_parallel_calls=False,
inter_op=True,
intra_op=True,
tf_gpu_thread_mode=True, tf_gpu_thread_mode=True,
datasets_num_private_threads=True, datasets_num_private_threads=True,
dynamic_loss_scale=dynamic_loss_scale, dynamic_loss_scale=dynamic_loss_scale,
fp16_implementation=fp16_implementation, fp16_implementation=fp16_implementation,
loss_scale=True, loss_scale=True,
tf_data_experimental_slack=True) tf_data_experimental_slack=True,
max_train_steps=True)
flags_core.define_image() flags_core.define_image()
flags_core.define_benchmark() flags_core.define_benchmark()
flags_core.define_distribution()
flags.adopt_module_key_flags(flags_core) flags.adopt_module_key_flags(flags_core)
flags.DEFINE_enum( flags.DEFINE_enum(
...@@ -768,16 +772,6 @@ def define_resnet_flags(resnet_size_choices=None, dynamic_loss_scale=False, ...@@ -768,16 +772,6 @@ def define_resnet_flags(resnet_size_choices=None, dynamic_loss_scale=False,
'If True, uses `tf.estimator.train_and_evaluate` for the training ' 'If True, uses `tf.estimator.train_and_evaluate` for the training '
'and evaluation loop, instead of separate calls to `classifier.train ' 'and evaluation loop, instead of separate calls to `classifier.train '
'and `classifier.evaluate`, which is the default behavior.')) 'and `classifier.evaluate`, which is the default behavior.'))
flags.DEFINE_string(
name='worker_hosts', default=None,
help=flags_core.help_wrap(
'Comma-separated list of worker ip:port pairs for running '
'multi-worker models with DistributionStrategy. The user would '
'start the program on each host with identical value for this flag.'))
flags.DEFINE_integer(
name='task_index', default=-1,
help=flags_core.help_wrap('If multi-worker training, the task_index of '
'this worker.'))
flags.DEFINE_bool( flags.DEFINE_bool(
name='enable_lars', default=False, name='enable_lars', default=False,
help=flags_core.help_wrap( help=flags_core.help_wrap(
......
...@@ -20,7 +20,7 @@ from __future__ import print_function ...@@ -20,7 +20,7 @@ from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order import tensorflow as tf # pylint: disable=g-bad-import-order
from official.utils.export import export from official.r1.utils import export
class ExportUtilsTest(tf.test.TestCase): class ExportUtilsTest(tf.test.TestCase):
......
# Predicting Income with the Census Income Dataset # Predicting Income with the Census Income Dataset
Note that, the implementation is based on TF 1.x.
It is subjected to move to R1 archive folder.
## Overview ## Overview
The [Census Income Data Set](https://archive.ics.uci.edu/ml/datasets/Census+Income) contains over 48,000 samples with attributes including age, occupation, education, and income (a binary label, either `>50K` or `<=50K`). The dataset is split into roughly 32,000 training and 16,000 testing samples. The [Census Income Data Set](https://archive.ics.uci.edu/ml/datasets/Census+Income) contains over 48,000 samples with attributes including age, occupation, education, and income (a binary label, either `>50K` or `<=50K`). The dataset is split into roughly 32,000 training and 16,000 testing samples.
......
...@@ -22,8 +22,8 @@ import tensorflow as tf ...@@ -22,8 +22,8 @@ import tensorflow as tf
from official.utils.flags import core as flags_core from official.utils.flags import core as flags_core
from official.utils.logs import logger from official.utils.logs import logger
from official.wide_deep import census_dataset from official.r1.wide_deep import census_dataset
from official.wide_deep import wide_deep_run_loop from official.r1.wide_deep import wide_deep_run_loop
def define_census_flags(): def define_census_flags():
......
...@@ -24,8 +24,8 @@ import tensorflow as tf # pylint: disable=g-bad-import-order ...@@ -24,8 +24,8 @@ import tensorflow as tf # pylint: disable=g-bad-import-order
from official.utils.misc import keras_utils from official.utils.misc import keras_utils
from official.utils.testing import integration from official.utils.testing import integration
from official.wide_deep import census_dataset from official.r1.wide_deep import census_dataset
from official.wide_deep import census_main from official.r1.wide_deep import census_main
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
...@@ -139,7 +139,7 @@ class BaseTest(tf.test.TestCase): ...@@ -139,7 +139,7 @@ class BaseTest(tf.test.TestCase):
'--model_type', 'wide', '--model_type', 'wide',
'--download_if_missing=false' '--download_if_missing=false'
], ],
synth=False, max_train=None) synth=False)
@unittest.skipIf(keras_utils.is_v2_0(), 'TF 1.0 only test.') @unittest.skipIf(keras_utils.is_v2_0(), 'TF 1.0 only test.')
def test_end_to_end_deep(self): def test_end_to_end_deep(self):
...@@ -150,7 +150,7 @@ class BaseTest(tf.test.TestCase): ...@@ -150,7 +150,7 @@ class BaseTest(tf.test.TestCase):
'--model_type', 'deep', '--model_type', 'deep',
'--download_if_missing=false' '--download_if_missing=false'
], ],
synth=False, max_train=None) synth=False)
@unittest.skipIf(keras_utils.is_v2_0(), 'TF 1.0 only test.') @unittest.skipIf(keras_utils.is_v2_0(), 'TF 1.0 only test.')
def test_end_to_end_wide_deep(self): def test_end_to_end_wide_deep(self):
...@@ -161,7 +161,7 @@ class BaseTest(tf.test.TestCase): ...@@ -161,7 +161,7 @@ class BaseTest(tf.test.TestCase):
'--model_type', 'wide_deep', '--model_type', 'wide_deep',
'--download_if_missing=false' '--download_if_missing=false'
], ],
synth=False, max_train=None) synth=False)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -27,8 +27,8 @@ import tensorflow as tf ...@@ -27,8 +27,8 @@ import tensorflow as tf
from official.datasets import movielens from official.datasets import movielens
from official.utils.flags import core as flags_core from official.utils.flags import core as flags_core
from official.utils.logs import logger from official.utils.logs import logger
from official.wide_deep import movielens_dataset from official.r1.wide_deep import movielens_dataset
from official.wide_deep import wide_deep_run_loop from official.r1.wide_deep import wide_deep_run_loop
def define_movie_flags(): def define_movie_flags():
......
...@@ -26,8 +26,8 @@ import tensorflow as tf # pylint: disable=g-bad-import-order ...@@ -26,8 +26,8 @@ import tensorflow as tf # pylint: disable=g-bad-import-order
from official.datasets import movielens from official.datasets import movielens
from official.utils.misc import keras_utils from official.utils.misc import keras_utils
from official.utils.testing import integration from official.utils.testing import integration
from official.wide_deep import movielens_dataset from official.r1.wide_deep import movielens_dataset
from official.wide_deep import movielens_main from official.r1.wide_deep import movielens_main
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
...@@ -112,7 +112,7 @@ class BaseTest(tf.test.TestCase): ...@@ -112,7 +112,7 @@ class BaseTest(tf.test.TestCase):
"--train_epochs", "1", "--train_epochs", "1",
"--epochs_between_evals", "1" "--epochs_between_evals", "1"
], ],
synth=False, max_train=None) synth=False)
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment