Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
b3128357
Commit
b3128357
authored
May 09, 2017
by
Xin Pan
Committed by
GitHub
May 09, 2017
Browse files
Merge pull request #1425 from edouardfouche/patch-1
Update lm_1b_eval.py (Python 3 compatibility)
parents
2c44a4b7
b2fc63b3
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
5 additions
and
4 deletions
+5
-4
lm_1b/lm_1b_eval.py
lm_1b/lm_1b_eval.py
+5
-4
No files found.
lm_1b/lm_1b_eval.py
View file @
b3128357
...
@@ -17,6 +17,7 @@
...
@@ -17,6 +17,7 @@
"""
"""
import
os
import
os
import
sys
import
sys
import
six
import
numpy
as
np
import
numpy
as
np
import
tensorflow
as
tf
import
tensorflow
as
tf
...
@@ -83,7 +84,7 @@ def _LoadModel(gd_file, ckpt_file):
...
@@ -83,7 +84,7 @@ def _LoadModel(gd_file, ckpt_file):
with
tf
.
Graph
().
as_default
():
with
tf
.
Graph
().
as_default
():
sys
.
stderr
.
write
(
'Recovering graph.
\n
'
)
sys
.
stderr
.
write
(
'Recovering graph.
\n
'
)
with
tf
.
gfile
.
FastGFile
(
gd_file
,
'r'
)
as
f
:
with
tf
.
gfile
.
FastGFile
(
gd_file
,
'r'
)
as
f
:
s
=
f
.
read
()
s
=
f
.
read
()
.
decode
()
gd
=
tf
.
GraphDef
()
gd
=
tf
.
GraphDef
()
text_format
.
Merge
(
s
,
gd
)
text_format
.
Merge
(
s
,
gd
)
...
@@ -177,7 +178,7 @@ def _SampleModel(prefix_words, vocab):
...
@@ -177,7 +178,7 @@ def _SampleModel(prefix_words, vocab):
prefix
=
[
vocab
.
word_to_id
(
w
)
for
w
in
prefix_words
.
split
()]
prefix
=
[
vocab
.
word_to_id
(
w
)
for
w
in
prefix_words
.
split
()]
prefix_char_ids
=
[
vocab
.
word_to_char_ids
(
w
)
for
w
in
prefix_words
.
split
()]
prefix_char_ids
=
[
vocab
.
word_to_char_ids
(
w
)
for
w
in
prefix_words
.
split
()]
for
_
in
x
range
(
FLAGS
.
num_samples
):
for
_
in
six
.
moves
.
range
(
FLAGS
.
num_samples
):
inputs
=
np
.
zeros
([
BATCH_SIZE
,
NUM_TIMESTEPS
],
np
.
int32
)
inputs
=
np
.
zeros
([
BATCH_SIZE
,
NUM_TIMESTEPS
],
np
.
int32
)
char_ids_inputs
=
np
.
zeros
(
char_ids_inputs
=
np
.
zeros
(
[
BATCH_SIZE
,
NUM_TIMESTEPS
,
vocab
.
max_word_length
],
np
.
int32
)
[
BATCH_SIZE
,
NUM_TIMESTEPS
,
vocab
.
max_word_length
],
np
.
int32
)
...
@@ -230,7 +231,7 @@ def _DumpEmb(vocab):
...
@@ -230,7 +231,7 @@ def _DumpEmb(vocab):
sys
.
stderr
.
write
(
'Finished softmax weights
\n
'
)
sys
.
stderr
.
write
(
'Finished softmax weights
\n
'
)
all_embs
=
np
.
zeros
([
vocab
.
size
,
1024
])
all_embs
=
np
.
zeros
([
vocab
.
size
,
1024
])
for
i
in
range
(
vocab
.
size
):
for
i
in
six
.
moves
.
range
(
vocab
.
size
):
input_dict
=
{
t
[
'inputs_in'
]:
inputs
,
input_dict
=
{
t
[
'inputs_in'
]:
inputs
,
t
[
'targets_in'
]:
targets
,
t
[
'targets_in'
]:
targets
,
t
[
'target_weights_in'
]:
weights
}
t
[
'target_weights_in'
]:
weights
}
...
@@ -269,7 +270,7 @@ def _DumpSentenceEmbedding(sentence, vocab):
...
@@ -269,7 +270,7 @@ def _DumpSentenceEmbedding(sentence, vocab):
inputs
=
np
.
zeros
([
BATCH_SIZE
,
NUM_TIMESTEPS
],
np
.
int32
)
inputs
=
np
.
zeros
([
BATCH_SIZE
,
NUM_TIMESTEPS
],
np
.
int32
)
char_ids_inputs
=
np
.
zeros
(
char_ids_inputs
=
np
.
zeros
(
[
BATCH_SIZE
,
NUM_TIMESTEPS
,
vocab
.
max_word_length
],
np
.
int32
)
[
BATCH_SIZE
,
NUM_TIMESTEPS
,
vocab
.
max_word_length
],
np
.
int32
)
for
i
in
x
range
(
len
(
word_ids
)):
for
i
in
six
.
moves
.
range
(
len
(
word_ids
)):
inputs
[
0
,
0
]
=
word_ids
[
i
]
inputs
[
0
,
0
]
=
word_ids
[
i
]
char_ids_inputs
[
0
,
0
,
:]
=
char_ids
[
i
]
char_ids_inputs
[
0
,
0
,
:]
=
char_ids
[
i
]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment