Commit 71c6a697 authored by guptapriya's avatar guptapriya Committed by guptapriya
Browse files

Clean up unused flags etc

parent 7b6c8999
......@@ -105,7 +105,6 @@ def parse_flags(flags_obj):
"match_mlperf": flags_obj.ml_perf,
"use_xla_for_gpu": flags_obj.use_xla_for_gpu,
"epochs_between_evals": FLAGS.epochs_between_evals,
"turn_off_distribution_strategy": FLAGS.turn_off_distribution_strategy,
"keras_use_ctl": flags_obj.keras_use_ctl,
"hr_threshold": flags_obj.hr_threshold,
}
......@@ -113,9 +112,6 @@ def parse_flags(flags_obj):
def get_distribution_strategy(params):
"""Returns the distribution strategy to use."""
if params["turn_off_distribution_strategy"]:
return None
if params["use_tpu"]:
# Some of the networking libraries are quite chatty.
for name in ["googleapiclient.discovery", "googleapiclient.discovery_cache",
......@@ -292,12 +288,6 @@ def define_ncf_flags():
name="seed", default=None, help=flags_core.help_wrap(
"This value will be used to seed both NumPy and TensorFlow."))
flags.DEFINE_boolean(
name="turn_off_distribution_strategy",
default=False,
help=flags_core.help_wrap(
"If set, do not use any distribution strategy."))
@flags.validator("eval_batch_size", "eval_batch_size must be at least {}"
.format(rconst.NUM_EVAL_NEGATIVES + 1))
def eval_size_check(eval_batch_size):
......
......@@ -88,6 +88,7 @@ def _get_train_and_eval_data(producer, params):
fake_dup_mask = tf.zeros_like(features[movielens.USER_COLUMN])
features[rconst.DUPLICATE_MASK] = fake_dup_mask
features[rconst.TRAIN_LABEL_KEY] = labels
#return (features,)
return features, labels
train_input_fn = producer.make_input_fn(is_training=True)
......@@ -110,6 +111,7 @@ def _get_train_and_eval_data(producer, params):
tf.zeros_like(features[movielens.USER_COLUMN]), tf.bool)
features[rconst.VALID_POINT_MASK] = fake_valit_pt_mask
features[rconst.TRAIN_LABEL_KEY] = labels
#return (features,)
return features, labels
eval_input_fn = producer.make_input_fn(is_training=False)
......@@ -233,8 +235,8 @@ def _get_keras_model(params):
from_logits=True,
reduction="sum")
loss_scale_factor = (batch_size *
tf.distribute.get_strategy().num_replicas_in_sync)
loss_scale_factor = (batch_size) #*
#tf.distribute.get_strategy().num_replicas_in_sync)
keras_model.add_loss(loss_obj(
y_true=label_input,
y_pred=softmax_logits,
......@@ -297,7 +299,9 @@ def run_ncf(_):
"val_metric_fn", desired_value=FLAGS.hr_threshold)
callbacks.append(early_stopping_callback)
strategy = ncf_common.get_distribution_strategy(params)
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus)
with distribution_utils.get_strategy_scope(strategy):
keras_model = _get_keras_model(params)
optimizer = tf.keras.optimizers.Adam(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment