Commit 79d2ecb1 authored by Neal Wu's avatar Neal Wu
Browse files

Revert "fixed a bug in sampled_loss(), made compatible for 0.12.0"

parent 3607bf48
...@@ -100,7 +100,7 @@ class Seq2SeqModel(object): ...@@ -100,7 +100,7 @@ class Seq2SeqModel(object):
b = tf.get_variable("proj_b", [self.target_vocab_size], dtype=dtype) b = tf.get_variable("proj_b", [self.target_vocab_size], dtype=dtype)
output_projection = (w, b) output_projection = (w, b)
def sampled_loss(inputs, labels): def sampled_loss(labels, inputs):
labels = tf.reshape(labels, [-1, 1]) labels = tf.reshape(labels, [-1, 1])
# We need to compute the sampled_softmax_loss using 32bit floats to # We need to compute the sampled_softmax_loss using 32bit floats to
# avoid numerical instabilities. # avoid numerical instabilities.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment