Commit 3e93722a authored by Neal Wu's avatar Neal Wu Committed by GitHub
Browse files

Merge branch 'master' into master

parents 2335c9fc 4de34a4c
......@@ -42,7 +42,7 @@ from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
word2vec = tf.load_op_library(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'word2vec_ops.so'))
flags = tf.app.flags
......@@ -263,9 +263,9 @@ class Word2Vec(object):
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
true_logits, tf.ones_like(true_logits))
labels=tf.ones_like(true_logits), logits=true_logits)
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
sampled_logits, tf.zeros_like(sampled_logits))
labels=tf.zeros_like(sampled_logits), logits=sampled_logits)
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
......
......@@ -41,7 +41,7 @@ from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
word2vec = tf.load_op_library(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'word2vec_ops.so'))
flags = tf.app.flags
......
......@@ -23,7 +23,7 @@ import os
import tensorflow as tf
from tensorflow.models.embedding import word2vec_optimized
import word2vec_optimized
flags = tf.app.flags
......
......@@ -23,7 +23,7 @@ import os
import tensorflow as tf
from tensorflow.models.embedding import word2vec
import word2vec
flags = tf.app.flags
......
......@@ -17,7 +17,7 @@
To run, use:
bazel run -c opt --config=cuda \
third_party/tensorflow/models/image/alexnet:alexnet_benchmark
models/tutorials/image/alexnet:alexnet_benchmark
Across 100 steps on batch size = 128.
......
......@@ -18,5 +18,5 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.models.image.cifar10 import cifar10
from tensorflow.models.image.cifar10 import cifar10_input
import cifar10
import cifar10_input
......@@ -35,7 +35,6 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import re
import sys
......@@ -44,7 +43,7 @@ import tarfile
from six.moves import urllib
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10_input
import cifar10_input
FLAGS = tf.app.flags.FLAGS
......@@ -287,7 +286,7 @@ def loss(logits, labels):
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, labels, name='cross_entropy_per_example')
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
......
......@@ -41,7 +41,7 @@ import time
import numpy as np
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10
import cifar10
FLAGS = tf.app.flags.FLAGS
......
......@@ -242,6 +242,10 @@ def inputs(eval_data, data_dir, batch_size):
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(resized_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
read_input.label.set_shape([1])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(num_examples_per_epoch *
......
......@@ -23,7 +23,7 @@ import os
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10_input
import cifar10_input
class CIFAR10InputTest(tf.test.TestCase):
......
......@@ -47,7 +47,7 @@ import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10
import cifar10
FLAGS = tf.app.flags.FLAGS
......
......@@ -41,7 +41,7 @@ import time
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10
import cifar10
FLAGS = tf.app.flags.FLAGS
......
......@@ -228,7 +228,7 @@ def main(_):
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, train_labels_node))
labels=train_labels_node, logits=logits))
# L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
......
......@@ -2,8 +2,8 @@ This directory contains functions for creating recurrent neural networks
and sequence-to-sequence models. Detailed instructions on how to get started
and use them are available in the tutorials.
* [RNN Tutorial](http://tensorflow.org/tutorials/recurrent/index.md)
* [Sequence-to-Sequence Tutorial](http://tensorflow.org/tutorials/seq2seq/index.md)
* [RNN Tutorial](http://tensorflow.org/tutorials/recurrent/)
* [Sequence-to-Sequence Tutorial](http://tensorflow.org/tutorials/seq2seq/)
Here is a short overview of what is in this directory.
......
......@@ -18,4 +18,4 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.models.rnn.ptb import reader
import reader
......@@ -61,7 +61,7 @@ import time
import numpy as np
import tensorflow as tf
from tensorflow.models.rnn.ptb import reader
import reader
flags = tf.flags
logging = tf.logging
......@@ -126,7 +126,7 @@ class PTBModel(object):
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
# Simplified version of tensorflow.models.rnn.rnn.py's rnn().
# Simplified version of models/tutorials/rnn/rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
......
......@@ -13,7 +13,7 @@
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.models.ptb_lstm.ptb_reader."""
"""Tests for models.tutorials.rnn.ptb.reader."""
from __future__ import absolute_import
from __future__ import division
......@@ -23,7 +23,7 @@ import os.path
import tensorflow as tf
from tensorflow.models.rnn.ptb import reader
import reader
class PtbReaderTest(tf.test.TestCase):
......
......@@ -18,5 +18,5 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.models.rnn.translate import data_utils
from tensorflow.models.rnn.translate import seq2seq_model
import data_utils
import seq2seq_model
......@@ -177,7 +177,7 @@ def initialize_vocabulary(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
......
......@@ -25,7 +25,7 @@ import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.rnn.translate import data_utils
import data_utils
class Seq2SeqModel(object):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment