Commit 983b7d08 authored by Evan Kepner's avatar Evan Kepner Committed by Evan Kepner
Browse files

correct authorship

parent 692b7526
...@@ -162,11 +162,21 @@ class PTBModel(object): ...@@ -162,11 +162,21 @@ class PTBModel(object):
"softmax_w", [size, vocab_size], dtype=data_type()) "softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type()) softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
logits = tf.matmul(output, softmax_w) + softmax_b logits = tf.matmul(output, softmax_w) + softmax_b
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits], # Reshape logits to be 3-D tensor for sequence loss
[tf.reshape(input_.targets, [-1])], logits = tf.reshape(logits, [batch_size, num_steps, vocab_size])
[tf.ones([batch_size * num_steps], dtype=data_type())])
self._cost = cost = tf.reduce_sum(loss) / batch_size # use the contrib sequence loss and average over the batches
loss = tf.contrib.seq2seq.sequence_loss(
logits,
input_.targets,
tf.ones([batch_size, num_steps], dtype=data_type()),
average_across_timesteps=False,
average_across_batch=True
)
# update the cost variables
self._cost = cost = tf.reduce_sum(loss)
self._final_state = state self._final_state = state
if not is_training: if not is_training:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment