Commit 4ddec8e0 authored by Neal Wu's avatar Neal Wu Committed by GitHub
Browse files

Merge pull request #935 from tensorflow/update-translate-model

Fix regressions caused by a previous change
parents e9e470d4 f63a80ae
...@@ -239,8 +239,8 @@ def data_to_token_ids(data_path, target_path, vocabulary_path, ...@@ -239,8 +239,8 @@ def data_to_token_ids(data_path, target_path, vocabulary_path,
counter += 1 counter += 1
if counter % 100000 == 0: if counter % 100000 == 0:
print(" tokenizing line %d" % counter) print(" tokenizing line %d" % counter)
token_ids = sentence_to_token_ids(line, vocab, tokenizer, token_ids = sentence_to_token_ids(tf.compat.as_bytes(line), vocab,
normalize_digits) tokenizer, normalize_digits)
tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n") tokens_file.write(" ".join([str(tok) for tok in token_ids]) + "\n")
......
...@@ -108,24 +108,29 @@ class Seq2SeqModel(object): ...@@ -108,24 +108,29 @@ class Seq2SeqModel(object):
local_b = tf.cast(b, tf.float32) local_b = tf.cast(b, tf.float32)
local_inputs = tf.cast(inputs, tf.float32) local_inputs = tf.cast(inputs, tf.float32)
return tf.cast( return tf.cast(
tf.nn.sampled_softmax_loss(local_w_t, local_b, local_inputs, labels, tf.nn.sampled_softmax_loss(
num_samples, self.target_vocab_size), weights=local_w_t,
biases=local_b,
labels=labels,
inputs=local_inputs,
num_sampled=num_samples,
num_classes=self.target_vocab_size),
dtype) dtype)
softmax_loss_function = sampled_loss softmax_loss_function = sampled_loss
# Create the internal multi-layer cell for our RNN. # Create the internal multi-layer cell for our RNN.
def single_cell(): def single_cell():
return tf.nn.rnn_cell.GRUCell(size) return tf.contrib.rnn.GRUCell(size)
if use_lstm: if use_lstm:
def single_cell(): def single_cell():
return tf.nn.rnn_cell.BasicLSTMCell(size) return tf.contrib.rnn.BasicLSTMCell(size)
cell = single_cell() cell = single_cell()
if num_layers > 1: if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell() for _ in range(num_layers)]) cell = tf.contrib.rnn.MultiRNNCell([single_cell() for _ in range(num_layers)])
# The seq2seq function: we use embedding for the input and attention. # The seq2seq function: we use embedding for the input and attention.
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode): def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
return tf.nn.seq2seq.embedding_attention_seq2seq( return tf.contrib.legacy_seq2seq.embedding_attention_seq2seq(
encoder_inputs, encoder_inputs,
decoder_inputs, decoder_inputs,
cell, cell,
...@@ -155,7 +160,7 @@ class Seq2SeqModel(object): ...@@ -155,7 +160,7 @@ class Seq2SeqModel(object):
# Training outputs and losses. # Training outputs and losses.
if forward_only: if forward_only:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets( self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets, self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True), self.target_weights, buckets, lambda x, y: seq2seq_f(x, y, True),
softmax_loss_function=softmax_loss_function) softmax_loss_function=softmax_loss_function)
...@@ -167,7 +172,7 @@ class Seq2SeqModel(object): ...@@ -167,7 +172,7 @@ class Seq2SeqModel(object):
for output in self.outputs[b] for output in self.outputs[b]
] ]
else: else:
self.outputs, self.losses = tf.nn.seq2seq.model_with_buckets( self.outputs, self.losses = tf.contrib.legacy_seq2seq.model_with_buckets(
self.encoder_inputs, self.decoder_inputs, targets, self.encoder_inputs, self.decoder_inputs, targets,
self.target_weights, buckets, self.target_weights, buckets,
lambda x, y: seq2seq_f(x, y, False), lambda x, y: seq2seq_f(x, y, False),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment