"scripts/vscode:/vscode.git/clone" did not exist on "99ec439da476c1a83ce29863395433833f0ac850"
Commit 2c6f12e3 authored by Hongkun Yu's avatar Hongkun Yu Committed by A. Unique TensorFlower
Browse files

Remove utils/logs usage for official models.

PiperOrigin-RevId: 308451074
parent 562e1978
...@@ -27,7 +27,6 @@ from official.benchmark.models import cifar_preprocessing ...@@ -27,7 +27,6 @@ from official.benchmark.models import cifar_preprocessing
from official.benchmark.models import resnet_cifar_model from official.benchmark.models import resnet_cifar_model
from official.benchmark.models import synthetic_util from official.benchmark.models import synthetic_util
from official.utils.flags import core as flags_core from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.utils.misc import distribution_utils from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils from official.utils.misc import keras_utils
from official.vision.image_classification.resnet import common from official.vision.image_classification.resnet import common
...@@ -277,8 +276,7 @@ def define_cifar_flags(): ...@@ -277,8 +276,7 @@ def define_cifar_flags():
def main(_): def main(_):
with logger.benchmark_context(flags.FLAGS): return run(flags.FLAGS)
return run(flags.FLAGS)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -28,7 +28,6 @@ import tensorflow as tf ...@@ -28,7 +28,6 @@ import tensorflow as tf
import tensorflow_model_optimization as tfmot import tensorflow_model_optimization as tfmot
from official.modeling import performance from official.modeling import performance
from official.utils.flags import core as flags_core from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.utils.misc import distribution_utils from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils from official.utils.misc import keras_utils
from official.utils.misc import model_helpers from official.utils.misc import model_helpers
...@@ -294,8 +293,7 @@ def define_imagenet_keras_flags(): ...@@ -294,8 +293,7 @@ def define_imagenet_keras_flags():
def main(_): def main(_):
model_helpers.apply_clean(flags.FLAGS) model_helpers.apply_clean(flags.FLAGS)
with logger.benchmark_context(flags.FLAGS): stats = run(flags.FLAGS)
stats = run(flags.FLAGS)
logging.info('Run stats:\n%s', stats) logging.info('Run stats:\n%s', stats)
......
...@@ -39,7 +39,6 @@ from official.nlp.transformer import transformer ...@@ -39,7 +39,6 @@ from official.nlp.transformer import transformer
from official.nlp.transformer import translate from official.nlp.transformer import translate
from official.nlp.transformer.utils import tokenizer from official.nlp.transformer.utils import tokenizer
from official.utils.flags import core as flags_core from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.utils.misc import distribution_utils from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils from official.utils.misc import keras_utils
...@@ -471,25 +470,24 @@ def _ensure_dir(log_dir): ...@@ -471,25 +470,24 @@ def _ensure_dir(log_dir):
def main(_): def main(_):
flags_obj = flags.FLAGS flags_obj = flags.FLAGS
with logger.benchmark_context(flags_obj): task = TransformerTask(flags_obj)
task = TransformerTask(flags_obj)
# Execute flag override logic for better model performance
# Execute flag override logic for better model performance if flags_obj.tf_gpu_thread_mode:
if flags_obj.tf_gpu_thread_mode: keras_utils.set_gpu_thread_mode_and_count(
keras_utils.set_gpu_thread_mode_and_count( per_gpu_thread_count=flags_obj.per_gpu_thread_count,
per_gpu_thread_count=flags_obj.per_gpu_thread_count, gpu_thread_mode=flags_obj.tf_gpu_thread_mode,
gpu_thread_mode=flags_obj.tf_gpu_thread_mode, num_gpus=flags_obj.num_gpus,
num_gpus=flags_obj.num_gpus, datasets_num_private_threads=flags_obj.datasets_num_private_threads)
datasets_num_private_threads=flags_obj.datasets_num_private_threads)
if flags_obj.mode == "train":
if flags_obj.mode == "train": task.train()
task.train() elif flags_obj.mode == "predict":
elif flags_obj.mode == "predict": task.predict()
task.predict() elif flags_obj.mode == "eval":
elif flags_obj.mode == "eval": task.eval()
task.eval() else:
else: raise ValueError("Invalid mode {}".format(flags_obj.mode))
raise ValueError("Invalid mode {}".format(flags_obj.mode))
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -22,19 +22,17 @@ import os ...@@ -22,19 +22,17 @@ import os
import pickle import pickle
import time import time
import timeit import timeit
import typing
# pylint: disable=wrong-import-order # pylint: disable=wrong-import-order
from absl import logging
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import tensorflow as tf import tensorflow as tf
from absl import logging import typing
# pylint: enable=wrong-import-order # pylint: enable=wrong-import-order
from official.recommendation import constants as rconst from official.recommendation import constants as rconst
from official.recommendation import data_pipeline from official.recommendation import data_pipeline
from official.recommendation import movielens from official.recommendation import movielens
from official.utils.logs import mlperf_helper
DATASET_TO_NUM_USERS_AND_ITEMS = { DATASET_TO_NUM_USERS_AND_ITEMS = {
...@@ -126,9 +124,6 @@ def _filter_index_sort(raw_rating_path, cache_path): ...@@ -126,9 +124,6 @@ def _filter_index_sort(raw_rating_path, cache_path):
num_users = len(original_users) num_users = len(original_users)
num_items = len(original_items) num_items = len(original_items)
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.PREPROC_HP_NUM_EVAL,
value=rconst.NUM_EVAL_NEGATIVES)
assert num_users <= np.iinfo(rconst.USER_DTYPE).max assert num_users <= np.iinfo(rconst.USER_DTYPE).max
assert num_items <= np.iinfo(rconst.ITEM_DTYPE).max assert num_items <= np.iinfo(rconst.ITEM_DTYPE).max
assert df[movielens.USER_COLUMN].max() == num_users - 1 assert df[movielens.USER_COLUMN].max() == num_users - 1
......
...@@ -37,8 +37,6 @@ from official.recommendation import movielens ...@@ -37,8 +37,6 @@ from official.recommendation import movielens
from official.recommendation import ncf_common from official.recommendation import ncf_common
from official.recommendation import ncf_input_pipeline from official.recommendation import ncf_input_pipeline
from official.recommendation import neumf_model from official.recommendation import neumf_model
from official.utils.logs import logger
from official.utils.logs import mlperf_helper
from official.utils.misc import distribution_utils from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils from official.utils.misc import keras_utils
from official.utils.misc import model_helpers from official.utils.misc import model_helpers
...@@ -551,10 +549,7 @@ def build_stats(loss, eval_result, time_callback): ...@@ -551,10 +549,7 @@ def build_stats(loss, eval_result, time_callback):
def main(_): def main(_):
with logger.benchmark_context(FLAGS), \ run_ncf(FLAGS)
mlperf_helper.LOGGER(FLAGS.output_ml_perf_compliance_logging):
mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0])
run_ncf(FLAGS)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -37,12 +37,10 @@ import sys ...@@ -37,12 +37,10 @@ import sys
from six.moves import xrange # pylint: disable=redefined-builtin from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf import tensorflow as tf
from official.recommendation import constants as rconst from official.recommendation import constants as rconst
from official.recommendation import movielens from official.recommendation import movielens
from official.recommendation import ncf_common from official.recommendation import ncf_common
from official.recommendation import stat_utils from official.recommendation import stat_utils
from official.utils.logs import mlperf_helper
def sparse_to_dense_grads(grads_and_vars): def sparse_to_dense_grads(grads_and_vars):
...@@ -99,16 +97,6 @@ def neumf_model_fn(features, labels, mode, params): ...@@ -99,16 +97,6 @@ def neumf_model_fn(features, labels, mode, params):
labels = tf.cast(labels, tf.int32) labels = tf.cast(labels, tf.int32)
valid_pt_mask = features[rconst.VALID_POINT_MASK] valid_pt_mask = features[rconst.VALID_POINT_MASK]
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.OPT_NAME, value="adam")
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.OPT_LR,
value=params["learning_rate"])
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.OPT_HP_ADAM_BETA1,
value=params["beta1"])
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.OPT_HP_ADAM_BETA2,
value=params["beta2"])
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.OPT_HP_ADAM_EPSILON,
value=params["epsilon"])
optimizer = tf.compat.v1.train.AdamOptimizer( optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=params["learning_rate"], learning_rate=params["learning_rate"],
beta1=params["beta1"], beta1=params["beta1"],
...@@ -117,9 +105,6 @@ def neumf_model_fn(features, labels, mode, params): ...@@ -117,9 +105,6 @@ def neumf_model_fn(features, labels, mode, params):
if params["use_tpu"]: if params["use_tpu"]:
optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer) optimizer = tf.compat.v1.tpu.CrossShardOptimizer(optimizer)
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.MODEL_HP_LOSS_FN,
value=mlperf_helper.TAGS.BCE)
loss = tf.compat.v1.losses.sparse_softmax_cross_entropy( loss = tf.compat.v1.losses.sparse_softmax_cross_entropy(
labels=labels, labels=labels,
logits=softmax_logits, logits=softmax_logits,
...@@ -171,10 +156,6 @@ def construct_model(user_input, item_input, params): ...@@ -171,10 +156,6 @@ def construct_model(user_input, item_input, params):
mf_dim = params["mf_dim"] mf_dim = params["mf_dim"]
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.MODEL_HP_MF_DIM, value=mf_dim)
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.MODEL_HP_MLP_LAYER_SIZES,
value=model_layers)
if model_layers[0] % 2 != 0: if model_layers[0] % 2 != 0:
raise ValueError("The first layer size should be multiple of 2!") raise ValueError("The first layer size should be multiple of 2!")
......
...@@ -27,7 +27,6 @@ import tensorflow as tf ...@@ -27,7 +27,6 @@ import tensorflow as tf
from official.modeling import performance from official.modeling import performance
from official.staging.training import controller from official.staging.training import controller
from official.utils.flags import core as flags_core from official.utils.flags import core as flags_core
from official.utils.logs import logger
from official.utils.misc import distribution_utils from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils from official.utils.misc import keras_utils
from official.utils.misc import model_helpers from official.utils.misc import model_helpers
...@@ -182,8 +181,7 @@ def run(flags_obj): ...@@ -182,8 +181,7 @@ def run(flags_obj):
def main(_): def main(_):
model_helpers.apply_clean(flags.FLAGS) model_helpers.apply_clean(flags.FLAGS)
with logger.benchmark_context(flags.FLAGS): stats = run(flags.FLAGS)
stats = run(flags.FLAGS)
logging.info('Run stats:\n%s', stats) logging.info('Run stats:\n%s', stats)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment