Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
f8e854b5
Unverified
Commit
f8e854b5
authored
Feb 02, 2018
by
Sergii Khomenko
Committed by
GitHub
Feb 02, 2018
Browse files
Merge branch 'master' into dataset
parents
52c7c53e
31adae53
Changes
23
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
64 additions
and
62 deletions
+64
-62
research/textsum/README.md
research/textsum/README.md
+1
-1
tutorials/image/cifar10/cifar10.py
tutorials/image/cifar10/cifar10.py
+3
-3
tutorials/image/cifar10/cifar10_input.py
tutorials/image/cifar10/cifar10_input.py
+60
-58
No files found.
research/textsum/README.md
View file @
f8e854b5
...
...
@@ -2,7 +2,7 @@ Sequence-to-Sequence with Attention Model for Text Summarization.
Authors:
Xin Pan
(xpan@google.com, github:panyx0718),
Xin Pan
Peter Liu (peterjliu@google.com, github:peterjliu)
<b>
Introduction
</b>
...
...
tutorials/image/cifar10/cifar10.py
View file @
f8e854b5
...
...
@@ -204,7 +204,7 @@ def inference(images):
kernel
=
_variable_with_weight_decay
(
'weights'
,
shape
=
[
5
,
5
,
3
,
64
],
stddev
=
5e-2
,
wd
=
0.0
)
wd
=
None
)
conv
=
tf
.
nn
.
conv2d
(
images
,
kernel
,
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
)
biases
=
_variable_on_cpu
(
'biases'
,
[
64
],
tf
.
constant_initializer
(
0.0
))
pre_activation
=
tf
.
nn
.
bias_add
(
conv
,
biases
)
...
...
@@ -223,7 +223,7 @@ def inference(images):
kernel
=
_variable_with_weight_decay
(
'weights'
,
shape
=
[
5
,
5
,
64
,
64
],
stddev
=
5e-2
,
wd
=
0.0
)
wd
=
None
)
conv
=
tf
.
nn
.
conv2d
(
norm1
,
kernel
,
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
)
biases
=
_variable_on_cpu
(
'biases'
,
[
64
],
tf
.
constant_initializer
(
0.1
))
pre_activation
=
tf
.
nn
.
bias_add
(
conv
,
biases
)
...
...
@@ -262,7 +262,7 @@ def inference(images):
# and performs the softmax internally for efficiency.
with
tf
.
variable_scope
(
'softmax_linear'
)
as
scope
:
weights
=
_variable_with_weight_decay
(
'weights'
,
[
192
,
NUM_CLASSES
],
stddev
=
1
/
192.0
,
wd
=
0.0
)
stddev
=
1
/
192.0
,
wd
=
None
)
biases
=
_variable_on_cpu
(
'biases'
,
[
NUM_CLASSES
],
tf
.
constant_initializer
(
0.0
))
softmax_linear
=
tf
.
add
(
tf
.
matmul
(
local4
,
weights
),
biases
,
name
=
scope
.
name
)
...
...
tutorials/image/cifar10/cifar10_input.py
View file @
f8e854b5
...
...
@@ -157,44 +157,45 @@ def distorted_inputs(data_dir, batch_size):
# Create a queue that produces the filenames to read.
filename_queue
=
tf
.
train
.
string_input_producer
(
filenames
)
# Read examples from files in the filename queue.
read_input
=
read_cifar10
(
filename_queue
)
reshaped_image
=
tf
.
cast
(
read_input
.
uint8image
,
tf
.
float32
)
height
=
IMAGE_SIZE
width
=
IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image
=
tf
.
random_crop
(
reshaped_image
,
[
height
,
width
,
3
])
# Randomly flip the image horizontally.
distorted_image
=
tf
.
image
.
random_flip_left_right
(
distorted_image
)
# Because these operations are not commutative, consider randomizing
# the order their operation.
# NOTE: since per_image_standardization zeros the mean and makes
# the stddev unit, this likely has no effect see tensorflow#1458.
distorted_image
=
tf
.
image
.
random_brightness
(
distorted_image
,
max_delta
=
63
)
distorted_image
=
tf
.
image
.
random_contrast
(
distorted_image
,
lower
=
0.2
,
upper
=
1.8
)
# Subtract off the mean and divide by the variance of the pixels.
float_image
=
tf
.
image
.
per_image_standardization
(
distorted_image
)
# Set the shapes of tensors.
float_image
.
set_shape
([
height
,
width
,
3
])
read_input
.
label
.
set_shape
([
1
])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue
=
0.4
min_queue_examples
=
int
(
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
*
min_fraction_of_examples_in_queue
)
print
(
'Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.'
%
min_queue_examples
)
with
tf
.
name_scope
(
'data_augmentation'
):
# Read examples from files in the filename queue.
read_input
=
read_cifar10
(
filename_queue
)
reshaped_image
=
tf
.
cast
(
read_input
.
uint8image
,
tf
.
float32
)
height
=
IMAGE_SIZE
width
=
IMAGE_SIZE
# Image processing for training the network. Note the many random
# distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image
=
tf
.
random_crop
(
reshaped_image
,
[
height
,
width
,
3
])
# Randomly flip the image horizontally.
distorted_image
=
tf
.
image
.
random_flip_left_right
(
distorted_image
)
# Because these operations are not commutative, consider randomizing
# the order their operation.
# NOTE: since per_image_standardization zeros the mean and makes
# the stddev unit, this likely has no effect see tensorflow#1458.
distorted_image
=
tf
.
image
.
random_brightness
(
distorted_image
,
max_delta
=
63
)
distorted_image
=
tf
.
image
.
random_contrast
(
distorted_image
,
lower
=
0.2
,
upper
=
1.8
)
# Subtract off the mean and divide by the variance of the pixels.
float_image
=
tf
.
image
.
per_image_standardization
(
distorted_image
)
# Set the shapes of tensors.
float_image
.
set_shape
([
height
,
width
,
3
])
read_input
.
label
.
set_shape
([
1
])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue
=
0.4
min_queue_examples
=
int
(
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
*
min_fraction_of_examples_in_queue
)
print
(
'Filling queue with %d CIFAR images before starting to train. '
'This will take a few minutes.'
%
min_queue_examples
)
# Generate a batch of images and labels by building up a queue of examples.
return
_generate_image_and_label_batch
(
float_image
,
read_input
.
label
,
...
...
@@ -226,32 +227,33 @@ def inputs(eval_data, data_dir, batch_size):
if
not
tf
.
gfile
.
Exists
(
f
):
raise
ValueError
(
'Failed to find file: '
+
f
)
# Create a queue that produces the filenames to read.
filename_queue
=
tf
.
train
.
string_input_producer
(
filenames
)
with
tf
.
name_scope
(
'input'
):
# Create a queue that produces the filenames to read.
filename_queue
=
tf
.
train
.
string_input_producer
(
filenames
)
# Read examples from files in the filename queue.
read_input
=
read_cifar10
(
filename_queue
)
reshaped_image
=
tf
.
cast
(
read_input
.
uint8image
,
tf
.
float32
)
# Read examples from files in the filename queue.
read_input
=
read_cifar10
(
filename_queue
)
reshaped_image
=
tf
.
cast
(
read_input
.
uint8image
,
tf
.
float32
)
height
=
IMAGE_SIZE
width
=
IMAGE_SIZE
height
=
IMAGE_SIZE
width
=
IMAGE_SIZE
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image
=
tf
.
image
.
resize_image_with_crop_or_pad
(
reshaped_image
,
height
,
width
)
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image
=
tf
.
image
.
resize_image_with_crop_or_pad
(
reshaped_image
,
height
,
width
)
# Subtract off the mean and divide by the variance of the pixels.
float_image
=
tf
.
image
.
per_image_standardization
(
resized_image
)
# Subtract off the mean and divide by the variance of the pixels.
float_image
=
tf
.
image
.
per_image_standardization
(
resized_image
)
# Set the shapes of tensors.
float_image
.
set_shape
([
height
,
width
,
3
])
read_input
.
label
.
set_shape
([
1
])
# Set the shapes of tensors.
float_image
.
set_shape
([
height
,
width
,
3
])
read_input
.
label
.
set_shape
([
1
])
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue
=
0.4
min_queue_examples
=
int
(
num_examples_per_epoch
*
min_fraction_of_examples_in_queue
)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue
=
0.4
min_queue_examples
=
int
(
num_examples_per_epoch
*
min_fraction_of_examples_in_queue
)
# Generate a batch of images and labels by building up a queue of examples.
return
_generate_image_and_label_batch
(
float_image
,
read_input
.
label
,
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment