Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
3e93722a
Commit
3e93722a
authored
Jan 13, 2017
by
Neal Wu
Committed by
GitHub
Jan 13, 2017
Browse files
Merge branch 'master' into master
parents
2335c9fc
4de34a4c
Changes
61
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
31 additions
and
28 deletions
+31
-28
tutorials/embedding/word2vec.py
tutorials/embedding/word2vec.py
+3
-3
tutorials/embedding/word2vec_optimized.py
tutorials/embedding/word2vec_optimized.py
+1
-1
tutorials/embedding/word2vec_optimized_test.py
tutorials/embedding/word2vec_optimized_test.py
+1
-1
tutorials/embedding/word2vec_test.py
tutorials/embedding/word2vec_test.py
+1
-1
tutorials/image/alexnet/alexnet_benchmark.py
tutorials/image/alexnet/alexnet_benchmark.py
+1
-1
tutorials/image/cifar10/__init__.py
tutorials/image/cifar10/__init__.py
+2
-2
tutorials/image/cifar10/cifar10.py
tutorials/image/cifar10/cifar10.py
+2
-3
tutorials/image/cifar10/cifar10_eval.py
tutorials/image/cifar10/cifar10_eval.py
+1
-1
tutorials/image/cifar10/cifar10_input.py
tutorials/image/cifar10/cifar10_input.py
+4
-0
tutorials/image/cifar10/cifar10_input_test.py
tutorials/image/cifar10/cifar10_input_test.py
+1
-1
tutorials/image/cifar10/cifar10_multi_gpu_train.py
tutorials/image/cifar10/cifar10_multi_gpu_train.py
+1
-1
tutorials/image/cifar10/cifar10_train.py
tutorials/image/cifar10/cifar10_train.py
+1
-1
tutorials/image/mnist/convolutional.py
tutorials/image/mnist/convolutional.py
+1
-1
tutorials/rnn/README.md
tutorials/rnn/README.md
+2
-2
tutorials/rnn/ptb/__init__.py
tutorials/rnn/ptb/__init__.py
+1
-1
tutorials/rnn/ptb/ptb_word_lm.py
tutorials/rnn/ptb/ptb_word_lm.py
+2
-2
tutorials/rnn/ptb/reader_test.py
tutorials/rnn/ptb/reader_test.py
+2
-2
tutorials/rnn/translate/__init__.py
tutorials/rnn/translate/__init__.py
+2
-2
tutorials/rnn/translate/data_utils.py
tutorials/rnn/translate/data_utils.py
+1
-1
tutorials/rnn/translate/seq2seq_model.py
tutorials/rnn/translate/seq2seq_model.py
+1
-1
No files found.
tutorials/embedding/word2vec.py
View file @
3e93722a
...
...
@@ -42,7 +42,7 @@ from six.moves import xrange # pylint: disable=redefined-builtin
import
numpy
as
np
import
tensorflow
as
tf
from
tensorflow.models.embedding
import
gen_word2vec
as
word2vec
word2vec
=
tf
.
load_op_library
(
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
)),
'word2vec_ops.so'
))
flags
=
tf
.
app
.
flags
...
...
@@ -263,9 +263,9 @@ class Word2Vec(object):
# cross-entropy(logits, labels)
opts
=
self
.
_options
true_xent
=
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
true_logits
,
tf
.
ones_like
(
true_logits
))
labels
=
tf
.
ones_like
(
true_logits
)
,
logits
=
true_logits
)
sampled_xent
=
tf
.
nn
.
sigmoid_cross_entropy_with_logits
(
sampled_logits
,
tf
.
zeros_like
(
sampled_logits
))
labels
=
tf
.
zeros_like
(
sampled_logits
)
,
logits
=
sampled_logits
)
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
...
...
tutorials/embedding/word2vec_optimized.py
View file @
3e93722a
...
...
@@ -41,7 +41,7 @@ from six.moves import xrange # pylint: disable=redefined-builtin
import
numpy
as
np
import
tensorflow
as
tf
from
tensorflow.models.embedding
import
gen_word2vec
as
word2vec
word2vec
=
tf
.
load_op_library
(
os
.
path
.
join
(
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
)),
'word2vec_ops.so'
))
flags
=
tf
.
app
.
flags
...
...
tutorials/embedding/word2vec_optimized_test.py
View file @
3e93722a
...
...
@@ -23,7 +23,7 @@ import os
import
tensorflow
as
tf
from
tensorflow.models.embedding
import
word2vec_optimized
import
word2vec_optimized
flags
=
tf
.
app
.
flags
...
...
tutorials/embedding/word2vec_test.py
View file @
3e93722a
...
...
@@ -23,7 +23,7 @@ import os
import
tensorflow
as
tf
from
tensorflow.models.embedding
import
word2vec
import
word2vec
flags
=
tf
.
app
.
flags
...
...
tutorials/image/alexnet/alexnet_benchmark.py
View file @
3e93722a
...
...
@@ -17,7 +17,7 @@
To run, use:
bazel run -c opt --config=cuda
\
third_party/tensorflow/mode
ls/image/alexnet:alexnet_benchmark
models/tutoria
ls/image/alexnet:alexnet_benchmark
Across 100 steps on batch size = 128.
...
...
tutorials/image/cifar10/__init__.py
View file @
3e93722a
...
...
@@ -18,5 +18,5 @@ from __future__ import absolute_import
from
__future__
import
division
from
__future__
import
print_function
from
tensorflow.models.image.cifar10
import
cifar10
from
tensorflow.models.image.cifar10
import
cifar10_input
import
cifar10
import
cifar10_input
tutorials/image/cifar10/cifar10.py
View file @
3e93722a
...
...
@@ -35,7 +35,6 @@ from __future__ import absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
gzip
import
os
import
re
import
sys
...
...
@@ -44,7 +43,7 @@ import tarfile
from
six.moves
import
urllib
import
tensorflow
as
tf
from
tensorflow.models.image.cifar10
import
cifar10_input
import
cifar10_input
FLAGS
=
tf
.
app
.
flags
.
FLAGS
...
...
@@ -287,7 +286,7 @@ def loss(logits, labels):
# Calculate the average cross entropy loss across the batch.
labels
=
tf
.
cast
(
labels
,
tf
.
int64
)
cross_entropy
=
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
l
ogits
,
label
s
,
name
=
'cross_entropy_per_example'
)
l
abels
=
labels
,
logits
=
logit
s
,
name
=
'cross_entropy_per_example'
)
cross_entropy_mean
=
tf
.
reduce_mean
(
cross_entropy
,
name
=
'cross_entropy'
)
tf
.
add_to_collection
(
'losses'
,
cross_entropy_mean
)
...
...
tutorials/image/cifar10/cifar10_eval.py
View file @
3e93722a
...
...
@@ -41,7 +41,7 @@ import time
import
numpy
as
np
import
tensorflow
as
tf
from
tensorflow.models.image.cifar10
import
cifar10
import
cifar10
FLAGS
=
tf
.
app
.
flags
.
FLAGS
...
...
tutorials/image/cifar10/cifar10_input.py
View file @
3e93722a
...
...
@@ -242,6 +242,10 @@ def inputs(eval_data, data_dir, batch_size):
# Subtract off the mean and divide by the variance of the pixels.
float_image
=
tf
.
image
.
per_image_standardization
(
resized_image
)
# Set the shapes of tensors.
float_image
.
set_shape
([
height
,
width
,
3
])
read_input
.
label
.
set_shape
([
1
])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue
=
0.4
min_queue_examples
=
int
(
num_examples_per_epoch
*
...
...
tutorials/image/cifar10/cifar10_input_test.py
View file @
3e93722a
...
...
@@ -23,7 +23,7 @@ import os
import
tensorflow
as
tf
from
tensorflow.models.image.cifar10
import
cifar10_input
import
cifar10_input
class
CIFAR10InputTest
(
tf
.
test
.
TestCase
):
...
...
tutorials/image/cifar10/cifar10_multi_gpu_train.py
View file @
3e93722a
...
...
@@ -47,7 +47,7 @@ import time
import
numpy
as
np
from
six.moves
import
xrange
# pylint: disable=redefined-builtin
import
tensorflow
as
tf
from
tensorflow.models.image.cifar10
import
cifar10
import
cifar10
FLAGS
=
tf
.
app
.
flags
.
FLAGS
...
...
tutorials/image/cifar10/cifar10_train.py
View file @
3e93722a
...
...
@@ -41,7 +41,7 @@ import time
import
tensorflow
as
tf
from
tensorflow.models.image.cifar10
import
cifar10
import
cifar10
FLAGS
=
tf
.
app
.
flags
.
FLAGS
...
...
tutorials/image/mnist/convolutional.py
View file @
3e93722a
...
...
@@ -228,7 +228,7 @@ def main(_):
# Training computation: logits + cross-entropy loss.
logits
=
model
(
train_data_node
,
True
)
loss
=
tf
.
reduce_mean
(
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
l
ogits
,
train_labels_node
))
l
abels
=
train_labels_node
,
logits
=
logits
))
# L2 regularization for the fully connected parameters.
regularizers
=
(
tf
.
nn
.
l2_loss
(
fc1_weights
)
+
tf
.
nn
.
l2_loss
(
fc1_biases
)
+
...
...
tutorials/rnn/README.md
View file @
3e93722a
...
...
@@ -2,8 +2,8 @@ This directory contains functions for creating recurrent neural networks
and sequence-to-sequence models. Detailed instructions on how to get started
and use them are available in the tutorials.
*
[
RNN Tutorial
](
http://tensorflow.org/tutorials/recurrent/
index.md
)
*
[
Sequence-to-Sequence Tutorial
](
http://tensorflow.org/tutorials/seq2seq/
index.md
)
*
[
RNN Tutorial
](
http://tensorflow.org/tutorials/recurrent/
)
*
[
Sequence-to-Sequence Tutorial
](
http://tensorflow.org/tutorials/seq2seq/
)
Here is a short overview of what is in this directory.
...
...
tutorials/rnn/ptb/__init__.py
View file @
3e93722a
...
...
@@ -18,4 +18,4 @@ from __future__ import absolute_import
from
__future__
import
division
from
__future__
import
print_function
from
tensorflow.models.rnn.ptb
import
reader
import
reader
tutorials/rnn/ptb/ptb_word_lm.py
View file @
3e93722a
...
...
@@ -61,7 +61,7 @@ import time
import
numpy
as
np
import
tensorflow
as
tf
from
tensorflow.models.rnn.ptb
import
reader
import
reader
flags
=
tf
.
flags
logging
=
tf
.
logging
...
...
@@ -126,7 +126,7 @@ class PTBModel(object):
if
is_training
and
config
.
keep_prob
<
1
:
inputs
=
tf
.
nn
.
dropout
(
inputs
,
config
.
keep_prob
)
# Simplified version of
tensorflow.mode
ls
.
rnn
.
rnn.py's rnn().
# Simplified version of
models/tutoria
ls
/
rnn
/
rnn.py's rnn().
# This builds an unrolled LSTM for tutorial purposes only.
# In general, use the rnn() or state_saving_rnn() from rnn.py.
#
...
...
tutorials/rnn/ptb/reader_test.py
View file @
3e93722a
...
...
@@ -13,7 +13,7 @@
# limitations under the License.
# ==============================================================================
"""Tests for
tensorflow.models.ptb_lstm
.ptb
_
reader."""
"""Tests for
models.tutorials.rnn
.ptb
.
reader."""
from
__future__
import
absolute_import
from
__future__
import
division
...
...
@@ -23,7 +23,7 @@ import os.path
import
tensorflow
as
tf
from
tensorflow.models.rnn.ptb
import
reader
import
reader
class
PtbReaderTest
(
tf
.
test
.
TestCase
):
...
...
tutorials/rnn/translate/__init__.py
View file @
3e93722a
...
...
@@ -18,5 +18,5 @@ from __future__ import absolute_import
from
__future__
import
division
from
__future__
import
print_function
from
tensorflow.models.rnn.translate
import
data_utils
from
tensorflow.models.rnn.translate
import
seq2seq_model
import
data_utils
import
seq2seq_model
tutorials/rnn/translate/data_utils.py
View file @
3e93722a
...
...
@@ -177,7 +177,7 @@ def initialize_vocabulary(vocabulary_path):
rev_vocab
=
[]
with
gfile
.
GFile
(
vocabulary_path
,
mode
=
"rb"
)
as
f
:
rev_vocab
.
extend
(
f
.
readlines
())
rev_vocab
=
[
line
.
strip
()
for
line
in
rev_vocab
]
rev_vocab
=
[
tf
.
compat
.
as_bytes
(
line
.
strip
()
)
for
line
in
rev_vocab
]
vocab
=
dict
([(
x
,
y
)
for
(
y
,
x
)
in
enumerate
(
rev_vocab
)])
return
vocab
,
rev_vocab
else
:
...
...
tutorials/rnn/translate/seq2seq_model.py
View file @
3e93722a
...
...
@@ -25,7 +25,7 @@ import numpy as np
from
six.moves
import
xrange
# pylint: disable=redefined-builtin
import
tensorflow
as
tf
from
tensorflow.models.rnn.translate
import
data_utils
import
data_utils
class
Seq2SeqModel
(
object
):
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment