Commit 3043566d authored by ayushmankumar7's avatar ayushmankumar7
Browse files

tf.compat.v1.logging implemented with absl

parent 1e2ceffd
......@@ -20,6 +20,7 @@ from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from official.benchmark.models import resnet_cifar_model
......@@ -100,7 +101,7 @@ class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float here
self.prev_lr = lr
tf.compat.v1.logging.debug(
logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
......@@ -280,6 +281,6 @@ def main(_):
if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
logging.set_verbosity(logging.INFO)
define_cifar_flags()
app.run(main)
......@@ -23,6 +23,7 @@ import time
from absl import flags
from absl.testing import flagsaver
from absl import logging
import tensorflow as tf
from official.recommendation import ncf_common
......@@ -51,7 +52,7 @@ class NCFKerasBenchmarkBase(tf.test.Benchmark):
def _setup(self):
"""Sets up and resets flags before each test."""
assert tf.version.VERSION.startswith('2.')
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
logging.set_verbosity(logging.INFO)
if NCFKerasBenchmarkBase.local_flags is None:
ncf_common.define_ncf_flags()
# Loads flags to get defaults to then override. List cannot be empty.
......
......@@ -22,6 +22,7 @@ import os
from absl.testing import parameterized
from absl.testing.absltest import mock
from absl import logging
import numpy as np
import tensorflow as tf
......@@ -125,7 +126,7 @@ def summaries_with_matching_keyword(keyword, summary_dir):
if event.summary is not None:
for value in event.summary.value:
if keyword in value.tag:
tf.compat.v1.logging.error(event)
logging.error(event)
yield event.summary
......
......@@ -20,7 +20,7 @@ from __future__ import print_function
import numpy as np
import tensorflow as tf
from absl import logging
from official.nlp.transformer.utils import tokenizer
_EXTRA_DECODE_LENGTH = 100
......@@ -117,7 +117,7 @@ def translate_file(model,
maxlen=params["decode_max_length"],
dtype="int32",
padding="post")
tf.compat.v1.logging.info("Decoding batch %d out of %d.", i,
logging.info("Decoding batch %d out of %d.", i,
num_decode_batches)
yield batch
......@@ -172,7 +172,7 @@ def translate_file(model,
translation = _trim_and_decode(val_outputs[j], subtokenizer)
translations.append(translation)
if print_all_translations:
tf.compat.v1.logging.info(
logging.info(
"Translating:\n\tInput: %s\n\tOutput: %s" %
(sorted_inputs[j + i * batch_size], translation))
......@@ -181,7 +181,7 @@ def translate_file(model,
if tf.io.gfile.isdir(output_file):
raise ValueError("File output is a directory, will not save outputs to "
"file.")
tf.compat.v1.logging.info("Writing to file %s" % output_file)
logging.info("Writing to file %s" % output_file)
with tf.compat.v1.gfile.Open(output_file, "w") as f:
for i in sorted_keys:
f.write("%s\n" % translations[i])
......@@ -191,10 +191,10 @@ def translate_from_text(model, subtokenizer, txt):
encoded_txt = _encode_and_add_eos(txt, subtokenizer)
result = model.predict(encoded_txt)
outputs = result["outputs"]
tf.compat.v1.logging.info("Original: \"%s\"" % txt)
logging.info("Original: \"%s\"" % txt)
translate_from_input(outputs, subtokenizer)
def translate_from_input(outputs, subtokenizer):
translation = _trim_and_decode(outputs, subtokenizer)
tf.compat.v1.logging.info("Translation: \"%s\"" % translation)
logging.info("Translation: \"%s\"" % translation)
......@@ -22,6 +22,7 @@ import collections
import re
import sys
import unicodedata
from absl import logging
import numpy as np
import six
......@@ -63,7 +64,7 @@ class Subtokenizer(object):
def __init__(self, vocab_file, reserved_tokens=None):
"""Initializes class, creating a vocab file if data_files is provided."""
tf.compat.v1.logging.info("Initializing Subtokenizer from file %s." %
logging.info("Initializing Subtokenizer from file %s." %
vocab_file)
if reserved_tokens is None:
......@@ -109,15 +110,15 @@ class Subtokenizer(object):
reserved_tokens = RESERVED_TOKENS
if tf.io.gfile.exists(vocab_file):
tf.compat.v1.logging.info("Vocab file already exists (%s)" % vocab_file)
logging.info("Vocab file already exists (%s)" % vocab_file)
else:
tf.compat.v1.logging.info("Begin steps to create subtoken vocabulary...")
logging.info("Begin steps to create subtoken vocabulary...")
token_counts = _count_tokens(files, file_byte_limit, correct_strip)
alphabet = _generate_alphabet_dict(token_counts)
subtoken_list = _generate_subtokens_with_target_vocab_size(
token_counts, alphabet, target_vocab_size, threshold, min_count,
reserved_tokens)
tf.compat.v1.logging.info("Generated vocabulary with %d subtokens." %
logging.info("Generated vocabulary with %d subtokens." %
len(subtoken_list))
_save_vocab_file(vocab_file, subtoken_list)
return Subtokenizer(vocab_file)
......@@ -402,7 +403,7 @@ def _generate_subtokens_with_target_vocab_size(
reserved_tokens = RESERVED_TOKENS
if min_count is not None:
tf.compat.v1.logging.info(
logging.info(
"Using min_count=%d to generate vocab with target size %d" %
(min_count, target_size))
return _generate_subtokens(
......@@ -411,13 +412,13 @@ def _generate_subtokens_with_target_vocab_size(
def bisect(min_val, max_val):
"""Recursive function to binary search for subtoken vocabulary."""
cur_count = (min_val + max_val) // 2
tf.compat.v1.logging.info("Binary search: trying min_count=%d (%d %d)" %
logging.info("Binary search: trying min_count=%d (%d %d)" %
(cur_count, min_val, max_val))
subtoken_list = _generate_subtokens(
token_counts, alphabet, cur_count, reserved_tokens=reserved_tokens)
val = len(subtoken_list)
tf.compat.v1.logging.info(
logging.info(
"Binary search: min_count=%d resulted in %d tokens" % (cur_count, val))
within_threshold = abs(val - target_size) < threshold
......@@ -434,7 +435,7 @@ def _generate_subtokens_with_target_vocab_size(
return other_subtoken_list
return subtoken_list
tf.compat.v1.logging.info("Finding best min_count to get target size of %d" %
logging.info("Finding best min_count to get target size of %d" %
target_size)
return bisect(_MIN_MIN_COUNT, _MAX_MIN_COUNT)
......@@ -603,7 +604,7 @@ def _generate_subtokens(
# subtoken_dict, count how often the resulting subtokens appear, and update
# the dictionary with subtokens w/ high enough counts.
for i in xrange(num_iterations):
tf.compat.v1.logging.info("\tGenerating subtokens: iteration %d" % i)
logging.info("\tGenerating subtokens: iteration %d" % i)
# Generate new subtoken->id dictionary using the new subtoken list.
subtoken_dict = _list_to_index_dict(subtoken_list)
......@@ -616,5 +617,5 @@ def _generate_subtokens(
subtoken_list, max_subtoken_length = _gen_new_subtoken_list(
subtoken_counts, min_count, alphabet, reserved_tokens)
tf.compat.v1.logging.info("\tVocab size: %d" % len(subtoken_list))
logging.info("\tVocab size: %d" % len(subtoken_list))
return subtoken_list
......@@ -17,6 +17,7 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from absl import app as absl_app
from absl import flags
from six.moves import range
......@@ -243,6 +244,6 @@ def main(_):
if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
logging.set_verbosity(logging.INFO)
define_mnist_flags()
absl_app.run(main)
......@@ -21,7 +21,7 @@ import time
import unittest
import tensorflow as tf # pylint: disable=g-bad-import-order
from absl import logging
from official.r1.mnist import mnist
from official.utils.misc import keras_utils
......@@ -143,5 +143,5 @@ class Benchmarks(tf.test.Benchmark):
if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
logging.set_verbosity(logging.ERROR)
tf.test.main()
......@@ -20,6 +20,7 @@ from __future__ import print_function
import os
from absl import logging
from absl import app as absl_app
from absl import flags
from six.moves import range
......@@ -139,7 +140,7 @@ def input_fn(is_training,
dataset = tf.data.FixedLengthRecordDataset(filenames, _RECORD_BYTES)
if input_context:
tf.compat.v1.logging.info(
logging.info(
'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d' % (
input_context.input_pipeline_id, input_context.num_input_pipelines))
dataset = dataset.shard(input_context.num_input_pipelines,
......@@ -270,7 +271,7 @@ def run_cifar(flags_obj):
Dictionary of results. Including final accuracy.
"""
if flags_obj.image_bytes_as_serving_input:
tf.compat.v1.logging.fatal(
logging.fatal(
'--image_bytes_as_serving_input cannot be set to True for CIFAR. '
'This flag is only applicable to ImageNet.')
return
......@@ -291,6 +292,6 @@ def main(_):
if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
logging.set_verbosity(logging.INFO)
define_cifar_flags()
absl_app.run(main)
......@@ -21,12 +21,13 @@ from tempfile import mkstemp
import numpy as np
import tensorflow as tf # pylint: disable=g-bad-import-order
from absl import logging
from official.r1.resnet import cifar10_main
from official.utils.misc import keras_utils
from official.utils.testing import integration
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
logging.set_verbosity(logging.ERROR)
_BATCH_SIZE = 128
_HEIGHT = 32
......
......@@ -21,6 +21,7 @@ import os
import time
from absl import flags
from absl import logging
from absl.testing import flagsaver
import tensorflow as tf # pylint: disable=g-bad-import-order
......@@ -56,7 +57,7 @@ class EstimatorBenchmark(tf.test.Benchmark):
def _setup(self):
"""Sets up and resets flags before each test."""
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
logging.set_verbosity(logging.INFO)
if EstimatorBenchmark.local_flags is None:
for flag_method in self.flag_methods:
flag_method()
......
......@@ -22,6 +22,7 @@ import os
from absl import app as absl_app
from absl import flags
from absl import logging
from six.moves import range
import tensorflow as tf
......@@ -194,7 +195,7 @@ def input_fn(is_training,
dataset = tf.data.Dataset.from_tensor_slices(filenames)
if input_context:
tf.compat.v1.logging.info(
logging.info(
'Sharding the dataset: input_pipeline_id=%d num_input_pipelines=%d' % (
input_context.input_pipeline_id, input_context.num_input_pipelines))
dataset = dataset.shard(input_context.num_input_pipelines,
......@@ -387,6 +388,6 @@ def main(_):
if __name__ == '__main__':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
logging.set_verbosity(logging.INFO)
define_imagenet_flags()
absl_app.run(main)
......@@ -25,7 +25,7 @@ from official.r1.resnet import imagenet_main
from official.utils.misc import keras_utils
from official.utils.testing import integration
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
logging.set_verbosity(logging.ERROR)
_BATCH_SIZE = 32
_LABEL_CLASSES = 1001
......
......@@ -28,6 +28,7 @@ import math
import multiprocessing
import os
from absl import logging
from absl import flags
import tensorflow as tf
......@@ -83,7 +84,7 @@ def process_record_dataset(dataset,
options.experimental_threading.private_threadpool_size = (
datasets_num_private_threads)
dataset = dataset.with_options(options)
tf.compat.v1.logging.info('datasets_num_private_threads: %s',
logging.info('datasets_num_private_threads: %s',
datasets_num_private_threads)
# Disable intra-op parallelism to optimize for throughput instead of latency.
......@@ -205,16 +206,16 @@ def override_flags_and_set_envars_for_gpu_thread_pool(flags_obj):
what has been set by the user on the command-line.
"""
cpu_count = multiprocessing.cpu_count()
tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count)
logging.info('Logical CPU cores: %s', cpu_count)
# Sets up thread pool for each GPU for op scheduling.
per_gpu_thread_count = 1
total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus
os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode
os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s',
logging.info('TF_GPU_THREAD_COUNT: %s',
os.environ['TF_GPU_THREAD_COUNT'])
tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s',
logging.info('TF_GPU_THREAD_MODE: %s',
os.environ['TF_GPU_THREAD_MODE'])
# Reduces general thread pool by number of threads used for GPU pool.
......@@ -648,7 +649,7 @@ def resnet_main(
hooks=train_hooks,
max_steps=flags_obj.max_train_steps)
eval_spec = tf.estimator.EvalSpec(input_fn=input_fn_eval)
tf.compat.v1.logging.info('Starting to train and evaluate.')
logging.info('Starting to train and evaluate.')
tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)
# tf.estimator.train_and_evalute doesn't return anything in multi-worker
# case.
......@@ -671,7 +672,7 @@ def resnet_main(
schedule[-1] = train_epochs - sum(schedule[:-1]) # over counting.
for cycle_index, num_train_epochs in enumerate(schedule):
tf.compat.v1.logging.info('Starting cycle: %d/%d', cycle_index,
logging.info('Starting cycle: %d/%d', cycle_index,
int(n_loops))
if num_train_epochs:
......@@ -691,7 +692,7 @@ def resnet_main(
# allows the eval (which is generally unimportant in those circumstances)
# to terminate. Note that eval will run for max_train_steps each loop,
# regardless of the global_step count.
tf.compat.v1.logging.info('Starting to evaluate.')
logging.info('Starting to evaluate.')
eval_results = classifier.evaluate(input_fn=input_fn_eval,
steps=flags_obj.max_train_steps)
......
......@@ -27,6 +27,7 @@ import uuid
import numpy as np
import six
from absl import logging
import tensorflow as tf
......@@ -50,9 +51,9 @@ class _GarbageCollector(object):
for i in self.temp_buffers:
if tf.io.gfile.exists(i):
tf.io.gfile.remove(i)
tf.compat.v1.logging.info("Buffer file {} removed".format(i))
logging.info("Buffer file {} removed".format(i))
except Exception as e:
tf.compat.v1.logging.error("Failed to cleanup buffer files: {}".format(e))
logging.error("Failed to cleanup buffer files: {}".format(e))
_GARBAGE_COLLECTOR = _GarbageCollector()
......@@ -176,7 +177,7 @@ def write_to_buffer(dataframe, buffer_path, columns, expected_size=None):
actual_size = tf.io.gfile.stat(buffer_path).length
if expected_size == actual_size:
return buffer_path
tf.compat.v1.logging.warning(
logging.warning(
"Existing buffer {} has size {}. Expected size {}. Deleting and "
"rebuilding buffer.".format(buffer_path, actual_size, expected_size))
tf.io.gfile.remove(buffer_path)
......@@ -187,7 +188,7 @@ def write_to_buffer(dataframe, buffer_path, columns, expected_size=None):
tf.io.gfile.makedirs(os.path.split(buffer_path)[0])
tf.compat.v1.logging.info("Constructing TFRecordDataset buffer: {}"
logging.info("Constructing TFRecordDataset buffer: {}"
.format(buffer_path))
count = 0
......@@ -198,10 +199,10 @@ def write_to_buffer(dataframe, buffer_path, columns, expected_size=None):
rows_per_core=_ROWS_PER_CORE):
_serialize_shards(df_shards, columns, pool, writer)
count += sum([len(s) for s in df_shards])
tf.compat.v1.logging.info("{}/{} examples written."
logging.info("{}/{} examples written."
.format(str(count).ljust(8), len(dataframe)))
finally:
pool.terminate()
tf.compat.v1.logging.info("Buffer write complete.")
logging.info("Buffer write complete.")
return buffer_path
......@@ -21,13 +21,14 @@ import os
import unittest
import tensorflow as tf # pylint: disable=g-bad-import-order
from absl import logging
from official.utils.misc import keras_utils
from official.utils.testing import integration
from official.r1.wide_deep import census_dataset
from official.r1.wide_deep import census_main
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
logging.set_verbosity(logging.ERROR)
TEST_INPUT = ('18,Self-emp-not-inc,987,Bachelors,12,Married-civ-spouse,abc,'
'Husband,zyx,wvu,34,56,78,tsr,<=50K')
......
......@@ -28,8 +28,9 @@ from official.utils.misc import keras_utils
from official.utils.testing import integration
from official.r1.wide_deep import movielens_dataset
from official.r1.wide_deep import movielens_main
from absl import logging
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
logging.set_verbosity(logging.ERROR)
TEST_INPUT_VALUES = {
......
......@@ -20,6 +20,7 @@ from __future__ import print_function
from absl import flags
import tensorflow as tf
from absl import logging
from official.utils.flags._conventions import help_wrap
......@@ -39,7 +40,7 @@ def require_cloud_storage(flag_names):
valid_flags = True
for key in flag_names:
if not flag_values[key].startswith("gs://"):
tf.compat.v1.logging.error("{} must be a GCS path.".format(key))
logging.error("{} must be a GCS path.".format(key))
valid_flags = False
return valid_flags
......
......@@ -25,6 +25,7 @@ from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
from absl import logging
from official.utils.logs import hooks
from official.utils.logs import logger
......@@ -57,7 +58,7 @@ def get_train_hooks(name_list, use_tpu=False, **kwargs):
return []
if use_tpu:
tf.compat.v1.logging.warning('hooks_helper received name_list `{}`, but a '
logging.warning('hooks_helper received name_list `{}`, but a '
'TPU is specified. No hooks will be used.'
.format(name_list))
return []
......
......@@ -26,7 +26,7 @@ import tensorflow as tf # pylint: disable=g-bad-import-order
from official.utils.logs import hooks
from official.utils.testing import mock_lib
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
logging.set_verbosity(logging.DEBUG)
class ExamplesPerSecondHookTest(tf.test.TestCase):
......
......@@ -35,6 +35,7 @@ from six.moves import _thread as thread
from absl import flags
import tensorflow as tf
from tensorflow.python.client import device_lib
from absl import logging
from official.utils.logs import cloud_lib
......@@ -119,7 +120,7 @@ class BaseBenchmarkLogger(object):
eval_results: dict, the result of evaluate.
"""
if not isinstance(eval_results, dict):
tf.compat.v1.logging.warning(
logging.warning(
"eval_results should be dictionary for logging. Got %s",
type(eval_results))
return
......@@ -144,10 +145,10 @@ class BaseBenchmarkLogger(object):
"""
metric = _process_metric_to_json(name, value, unit, global_step, extras)
if metric:
tf.compat.v1.logging.info("Benchmark metric: %s", metric)
logging.info("Benchmark metric: %s", metric)
def log_run_info(self, model_name, dataset_name, run_params, test_id=None):
tf.compat.v1.logging.info(
logging.info(
"Benchmark run: %s", _gather_run_info(model_name, dataset_name,
run_params, test_id))
......@@ -187,7 +188,7 @@ class BenchmarkFileLogger(BaseBenchmarkLogger):
self._metric_file_handler.write("\n")
self._metric_file_handler.flush()
except (TypeError, ValueError) as e:
tf.compat.v1.logging.warning(
logging.warning(
"Failed to dump metric to log file: name %s, value %s, error %s",
name, value, e)
......@@ -212,7 +213,7 @@ class BenchmarkFileLogger(BaseBenchmarkLogger):
json.dump(run_info, f)
f.write("\n")
except (TypeError, ValueError) as e:
tf.compat.v1.logging.warning(
logging.warning(
"Failed to dump benchmark run info to log file: %s", e)
def on_finish(self, status):
......@@ -322,7 +323,7 @@ def _process_metric_to_json(
name, value, unit=None, global_step=None, extras=None):
"""Validate the metric data and generate JSON for insert."""
if not isinstance(value, numbers.Number):
tf.compat.v1.logging.warning(
logging.warning(
"Metric value to log should be a number. Got %s", type(value))
return None
......@@ -383,7 +384,7 @@ def _collect_cpu_info(run_info):
run_info["machine_config"]["cpu_info"] = cpu_info
except ImportError:
tf.compat.v1.logging.warn(
logging.warn(
"'cpuinfo' not imported. CPU info will not be logged.")
......@@ -396,7 +397,7 @@ def _collect_memory_info(run_info):
run_info["machine_config"]["memory_total"] = vmem.total
run_info["machine_config"]["memory_available"] = vmem.available
except ImportError:
tf.compat.v1.logging.warn(
logging.warn(
"'psutil' not imported. Memory info will not be logged.")
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment