Commit 3b56ba8d authored by Hongkun Yu's avatar Hongkun Yu Committed by A. Unique TensorFlower
Browse files

Remove TODOs that will never fulfill.

PiperOrigin-RevId: 320124801
parent 212105f3
...@@ -88,7 +88,6 @@ def is_special_none_tensor(tensor): ...@@ -88,7 +88,6 @@ def is_special_none_tensor(tensor):
return tensor.shape.ndims == 0 and tensor.dtype == tf.int32 return tensor.shape.ndims == 0 and tensor.dtype == tf.int32
# TODO(hongkuny): consider moving custom string-map lookup to keras api.
def get_activation(identifier): def get_activation(identifier):
"""Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`. """Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`.
......
...@@ -559,7 +559,6 @@ def run_customized_training_loop( ...@@ -559,7 +559,6 @@ def run_customized_training_loop(
for metric in model.metrics: for metric in model.metrics:
training_summary[metric.name] = _float_metric_value(metric) training_summary[metric.name] = _float_metric_value(metric)
if eval_metrics: if eval_metrics:
# TODO(hongkuny): Cleans up summary reporting in text.
training_summary['last_train_metrics'] = _float_metric_value( training_summary['last_train_metrics'] = _float_metric_value(
train_metrics[0]) train_metrics[0])
training_summary['eval_metrics'] = _float_metric_value(eval_metrics[0]) training_summary['eval_metrics'] = _float_metric_value(eval_metrics[0])
......
...@@ -52,7 +52,6 @@ def create_model(params, is_train): ...@@ -52,7 +52,6 @@ def create_model(params, is_train):
logits = tf.keras.layers.Lambda(lambda x: x, name="logits", logits = tf.keras.layers.Lambda(lambda x: x, name="logits",
dtype=tf.float32)(logits) dtype=tf.float32)(logits)
model = tf.keras.Model([inputs, targets], logits) model = tf.keras.Model([inputs, targets], logits)
# TODO(reedwm): Can we do this loss in float16 instead of float32?
loss = metrics.transformer_loss( loss = metrics.transformer_loss(
logits, targets, label_smoothing, vocab_size) logits, targets, label_smoothing, vocab_size)
model.add_loss(loss) model.add_loss(loss)
...@@ -238,7 +237,6 @@ class Transformer(tf.keras.Model): ...@@ -238,7 +237,6 @@ class Transformer(tf.keras.Model):
decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias( decoder_self_attention_bias = model_utils.get_decoder_self_attention_bias(
max_decode_length, dtype=self.params["dtype"]) max_decode_length, dtype=self.params["dtype"])
# TODO(b/139770046): Refactor code with better naming of i.
def symbols_to_logits_fn(ids, i, cache): def symbols_to_logits_fn(ids, i, cache):
"""Generate logits for next potential IDs. """Generate logits for next potential IDs.
......
...@@ -248,7 +248,6 @@ class TransformerTask(object): ...@@ -248,7 +248,6 @@ class TransformerTask(object):
callbacks = [cb for cb in callbacks callbacks = [cb for cb in callbacks
if isinstance(cb, keras_utils.TimeHistory)] if isinstance(cb, keras_utils.TimeHistory)]
# TODO(b/139418525): Refactor the custom training loop logic.
@tf.function @tf.function
def train_steps(iterator, steps): def train_steps(iterator, steps):
"""Training steps function for TPU runs. """Training steps function for TPU runs.
...@@ -422,8 +421,6 @@ class TransformerTask(object): ...@@ -422,8 +421,6 @@ class TransformerTask(object):
"""Loads model weights when it is provided.""" """Loads model weights when it is provided."""
if init_weight_path: if init_weight_path:
logging.info("Load weights: {}".format(init_weight_path)) logging.info("Load weights: {}".format(init_weight_path))
# TODO(b/139414977): Having the same variable restoring method for both
# TPU and GPU.
if self.use_tpu: if self.use_tpu:
checkpoint = tf.train.Checkpoint( checkpoint = tf.train.Checkpoint(
model=model, optimizer=self._create_optimizer()) model=model, optimizer=self._create_optimizer())
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment