Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
296d4f65
Commit
296d4f65
authored
Mar 23, 2017
by
Ivan Bogatyy
Browse files
Merge
https://github.com/tensorflow/models
parents
9a463f1e
277f99c7
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
7 additions
and
6 deletions
+7
-6
inception/inception/slim/README.md
inception/inception/slim/README.md
+1
-1
textsum/seq2seq_attention_model.py
textsum/seq2seq_attention_model.py
+3
-2
tutorials/rnn/ptb/ptb_word_lm.py
tutorials/rnn/ptb/ptb_word_lm.py
+1
-1
tutorials/rnn/translate/seq2seq_model.py
tutorials/rnn/translate/seq2seq_model.py
+2
-2
No files found.
inception/inception/slim/README.md
View file @
296d4f65
...
...
@@ -319,7 +319,7 @@ their use, consider the following example.
def
MyNewOp
(
inputs
):
varA
=
...
varB
=
...
outputs
=
tf
.
mul
(
varA
,
inputs
)
+
varB
outputs
=
tf
.
mul
tiply
(
varA
,
inputs
)
+
varB
return
outputs
```
...
...
textsum/seq2seq_attention_model.py
View file @
296d4f65
...
...
@@ -227,8 +227,9 @@ class Seq2SeqAttentionModel(object):
def
sampled_loss_func
(
inputs
,
labels
):
with
tf
.
device
(
'/cpu:0'
):
# Try gpu.
labels
=
tf
.
reshape
(
labels
,
[
-
1
,
1
])
return
tf
.
nn
.
sampled_softmax_loss
(
w_t
,
v
,
inputs
,
labels
,
hps
.
num_softmax_samples
,
vsize
)
return
tf
.
nn
.
sampled_softmax_loss
(
weights
=
w_t
,
biases
=
v
,
labels
=
labels
,
inputs
=
inputs
,
num_sampled
=
hps
.
num_softmax_samples
,
num_classes
=
vsize
)
if
hps
.
num_softmax_samples
!=
0
and
hps
.
mode
==
'train'
:
self
.
_loss
=
seq2seq_lib
.
sampled_sequence_loss
(
...
...
tutorials/rnn/ptb/ptb_word_lm.py
View file @
296d4f65
...
...
@@ -110,7 +110,7 @@ class PTBModel(object):
# different than reported in the paper.
def
lstm_cell
():
return
tf
.
contrib
.
rnn
.
BasicLSTMCell
(
size
,
forget_bias
=
0.0
,
state_is_tuple
=
True
,
reuse
=
tf
.
get_variable_scope
().
reuse
)
size
,
forget_bias
=
0.0
,
state_is_tuple
=
True
)
attn_cell
=
lstm_cell
if
is_training
and
config
.
keep_prob
<
1
:
def
attn_cell
():
...
...
tutorials/rnn/translate/seq2seq_model.py
View file @
296d4f65
...
...
@@ -100,13 +100,13 @@ class Seq2SeqModel(object):
b
=
tf
.
get_variable
(
"proj_b"
,
[
self
.
target_vocab_size
],
dtype
=
dtype
)
output_projection
=
(
w
,
b
)
def
sampled_loss
(
labels
,
inpu
ts
):
def
sampled_loss
(
labels
,
logi
ts
):
labels
=
tf
.
reshape
(
labels
,
[
-
1
,
1
])
# We need to compute the sampled_softmax_loss using 32bit floats to
# avoid numerical instabilities.
local_w_t
=
tf
.
cast
(
w_t
,
tf
.
float32
)
local_b
=
tf
.
cast
(
b
,
tf
.
float32
)
local_inputs
=
tf
.
cast
(
inpu
ts
,
tf
.
float32
)
local_inputs
=
tf
.
cast
(
logi
ts
,
tf
.
float32
)
return
tf
.
cast
(
tf
.
nn
.
sampled_softmax_loss
(
weights
=
local_w_t
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment