Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
3bf85a4e
Commit
3bf85a4e
authored
Aug 26, 2017
by
Martin Wicke
Committed by
GitHub
Aug 26, 2017
Browse files
Merge pull request #2254 from alanyee/master
Update autoencoders
parents
da62bb0b
289a2f99
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
58 additions
and
31 deletions
+58
-31
autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py
autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py
+16
-8
autoencoder/AutoencoderRunner.py
autoencoder/AutoencoderRunner.py
+15
-7
autoencoder/MaskingNoiseAutoencoderRunner.py
autoencoder/MaskingNoiseAutoencoderRunner.py
+16
-9
autoencoder/VariationalAutoencoderRunner.py
autoencoder/VariationalAutoencoderRunner.py
+11
-7
No files found.
autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py
View file @
3bf85a4e
import
numpy
as
np
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
numpy
as
np
import
sklearn.preprocessing
as
prep
import
sklearn.preprocessing
as
prep
import
tensorflow
as
tf
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
from
tensorflow.examples.tutorials.mnist
import
input_data
from
autoencoder_models.DenoisingAutoencoder
import
AdditiveGaussianNoiseAutoencoder
from
autoencoder_models.DenoisingAutoencoder
import
AdditiveGaussianNoiseAutoencoder
mnist
=
input_data
.
read_data_sets
(
'MNIST_data'
,
one_hot
=
True
)
mnist
=
input_data
.
read_data_sets
(
'MNIST_data'
,
one_hot
=
True
)
def
standard_scale
(
X_train
,
X_test
):
def
standard_scale
(
X_train
,
X_test
):
preprocessor
=
prep
.
StandardScaler
().
fit
(
X_train
)
preprocessor
=
prep
.
StandardScaler
().
fit
(
X_train
)
...
@@ -14,10 +18,12 @@ def standard_scale(X_train, X_test):
...
@@ -14,10 +18,12 @@ def standard_scale(X_train, X_test):
X_test
=
preprocessor
.
transform
(
X_test
)
X_test
=
preprocessor
.
transform
(
X_test
)
return
X_train
,
X_test
return
X_train
,
X_test
def
get_random_block_from_data
(
data
,
batch_size
):
def
get_random_block_from_data
(
data
,
batch_size
):
start_index
=
np
.
random
.
randint
(
0
,
len
(
data
)
-
batch_size
)
start_index
=
np
.
random
.
randint
(
0
,
len
(
data
)
-
batch_size
)
return
data
[
start_index
:(
start_index
+
batch_size
)]
return
data
[
start_index
:(
start_index
+
batch_size
)]
X_train
,
X_test
=
standard_scale
(
mnist
.
train
.
images
,
mnist
.
test
.
images
)
X_train
,
X_test
=
standard_scale
(
mnist
.
train
.
images
,
mnist
.
test
.
images
)
n_samples
=
int
(
mnist
.
train
.
num_examples
)
n_samples
=
int
(
mnist
.
train
.
num_examples
)
...
@@ -25,11 +31,12 @@ training_epochs = 20
...
@@ -25,11 +31,12 @@ training_epochs = 20
batch_size
=
128
batch_size
=
128
display_step
=
1
display_step
=
1
autoencoder
=
AdditiveGaussianNoiseAutoencoder
(
n_input
=
784
,
autoencoder
=
AdditiveGaussianNoiseAutoencoder
(
n_hidden
=
200
,
n_input
=
784
,
transfer_function
=
tf
.
nn
.
softplus
,
n_hidden
=
200
,
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
0.001
),
transfer_function
=
tf
.
nn
.
softplus
,
scale
=
0.01
)
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
0.001
),
scale
=
0.01
)
for
epoch
in
range
(
training_epochs
):
for
epoch
in
range
(
training_epochs
):
avg_cost
=
0.
avg_cost
=
0.
...
@@ -45,6 +52,7 @@ for epoch in range(training_epochs):
...
@@ -45,6 +52,7 @@ for epoch in range(training_epochs):
# Display logs per epoch step
# Display logs per epoch step
if
epoch
%
display_step
==
0
:
if
epoch
%
display_step
==
0
:
print
(
"Epoch:"
,
'%04d'
%
(
epoch
+
1
),
"cost="
,
"{:.9f}"
.
format
(
avg_cost
))
print
(
"Epoch:"
,
'%d,'
%
(
epoch
+
1
),
"Cost:"
,
"{:.9f}"
.
format
(
avg_cost
))
print
(
"Total cost: "
+
str
(
autoencoder
.
calc_total_cost
(
X_test
)))
print
(
"Total cost: "
+
str
(
autoencoder
.
calc_total_cost
(
X_test
)))
autoencoder/AutoencoderRunner.py
View file @
3bf85a4e
import
numpy
as
np
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
numpy
as
np
import
sklearn.preprocessing
as
prep
import
sklearn.preprocessing
as
prep
import
tensorflow
as
tf
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
from
tensorflow.examples.tutorials.mnist
import
input_data
from
autoencoder_models.Autoencoder
import
Autoencoder
from
autoencoder_models.Autoencoder
import
Autoencoder
mnist
=
input_data
.
read_data_sets
(
'MNIST_data'
,
one_hot
=
True
)
mnist
=
input_data
.
read_data_sets
(
'MNIST_data'
,
one_hot
=
True
)
def
standard_scale
(
X_train
,
X_test
):
def
standard_scale
(
X_train
,
X_test
):
preprocessor
=
prep
.
StandardScaler
().
fit
(
X_train
)
preprocessor
=
prep
.
StandardScaler
().
fit
(
X_train
)
...
@@ -14,10 +18,12 @@ def standard_scale(X_train, X_test):
...
@@ -14,10 +18,12 @@ def standard_scale(X_train, X_test):
X_test
=
preprocessor
.
transform
(
X_test
)
X_test
=
preprocessor
.
transform
(
X_test
)
return
X_train
,
X_test
return
X_train
,
X_test
def
get_random_block_from_data
(
data
,
batch_size
):
def
get_random_block_from_data
(
data
,
batch_size
):
start_index
=
np
.
random
.
randint
(
0
,
len
(
data
)
-
batch_size
)
start_index
=
np
.
random
.
randint
(
0
,
len
(
data
)
-
batch_size
)
return
data
[
start_index
:(
start_index
+
batch_size
)]
return
data
[
start_index
:(
start_index
+
batch_size
)]
X_train
,
X_test
=
standard_scale
(
mnist
.
train
.
images
,
mnist
.
test
.
images
)
X_train
,
X_test
=
standard_scale
(
mnist
.
train
.
images
,
mnist
.
test
.
images
)
n_samples
=
int
(
mnist
.
train
.
num_examples
)
n_samples
=
int
(
mnist
.
train
.
num_examples
)
...
@@ -25,10 +31,11 @@ training_epochs = 20
...
@@ -25,10 +31,11 @@ training_epochs = 20
batch_size
=
128
batch_size
=
128
display_step
=
1
display_step
=
1
autoencoder
=
Autoencoder
(
n_input
=
784
,
autoencoder
=
Autoencoder
(
n_hidden
=
200
,
n_input
=
784
,
transfer_function
=
tf
.
nn
.
softplus
,
n_hidden
=
200
,
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
0.001
))
transfer_function
=
tf
.
nn
.
softplus
,
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
0.001
))
for
epoch
in
range
(
training_epochs
):
for
epoch
in
range
(
training_epochs
):
avg_cost
=
0.
avg_cost
=
0.
...
@@ -44,6 +51,7 @@ for epoch in range(training_epochs):
...
@@ -44,6 +51,7 @@ for epoch in range(training_epochs):
# Display logs per epoch step
# Display logs per epoch step
if
epoch
%
display_step
==
0
:
if
epoch
%
display_step
==
0
:
print
(
"Epoch:"
,
'%04d'
%
(
epoch
+
1
),
"cost="
,
"{:.9f}"
.
format
(
avg_cost
))
print
(
"Epoch:"
,
'%d,'
%
(
epoch
+
1
),
"Cost:"
,
"{:.9f}"
.
format
(
avg_cost
))
print
(
"Total cost: "
+
str
(
autoencoder
.
calc_total_cost
(
X_test
)))
print
(
"Total cost: "
+
str
(
autoencoder
.
calc_total_cost
(
X_test
)))
autoencoder/MaskingNoiseAutoencoderRunner.py
View file @
3bf85a4e
import
numpy
as
np
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
numpy
as
np
import
sklearn.preprocessing
as
prep
import
sklearn.preprocessing
as
prep
import
tensorflow
as
tf
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
from
tensorflow.examples.tutorials.mnist
import
input_data
from
autoencoder_models.DenoisingAutoencoder
import
MaskingNoiseAutoencoder
from
autoencoder_models.DenoisingAutoencoder
import
MaskingNoiseAutoencoder
mnist
=
input_data
.
read_data_sets
(
'MNIST_data'
,
one_hot
=
True
)
mnist
=
input_data
.
read_data_sets
(
'MNIST_data'
,
one_hot
=
True
)
def
standard_scale
(
X_train
,
X_test
):
def
standard_scale
(
X_train
,
X_test
):
preprocessor
=
prep
.
StandardScaler
().
fit
(
X_train
)
preprocessor
=
prep
.
StandardScaler
().
fit
(
X_train
)
...
@@ -14,23 +18,25 @@ def standard_scale(X_train, X_test):
...
@@ -14,23 +18,25 @@ def standard_scale(X_train, X_test):
X_test
=
preprocessor
.
transform
(
X_test
)
X_test
=
preprocessor
.
transform
(
X_test
)
return
X_train
,
X_test
return
X_train
,
X_test
def
get_random_block_from_data
(
data
,
batch_size
):
def
get_random_block_from_data
(
data
,
batch_size
):
start_index
=
np
.
random
.
randint
(
0
,
len
(
data
)
-
batch_size
)
start_index
=
np
.
random
.
randint
(
0
,
len
(
data
)
-
batch_size
)
return
data
[
start_index
:(
start_index
+
batch_size
)]
return
data
[
start_index
:(
start_index
+
batch_size
)]
X_train
,
X_test
=
standard_scale
(
mnist
.
train
.
images
,
mnist
.
test
.
images
)
X_train
,
X_test
=
standard_scale
(
mnist
.
train
.
images
,
mnist
.
test
.
images
)
n_samples
=
int
(
mnist
.
train
.
num_examples
)
n_samples
=
int
(
mnist
.
train
.
num_examples
)
training_epochs
=
100
training_epochs
=
100
batch_size
=
128
batch_size
=
128
display_step
=
1
display_step
=
1
autoencoder
=
MaskingNoiseAutoencoder
(
n_input
=
784
,
autoencoder
=
MaskingNoiseAutoencoder
(
n_hidden
=
200
,
n_input
=
784
,
transfer_function
=
tf
.
nn
.
softplus
,
n_hidden
=
200
,
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
0.001
),
transfer_function
=
tf
.
nn
.
softplus
,
dropout_probability
=
0.95
)
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
0.001
),
dropout_probability
=
0.95
)
for
epoch
in
range
(
training_epochs
):
for
epoch
in
range
(
training_epochs
):
avg_cost
=
0.
avg_cost
=
0.
...
@@ -43,6 +49,7 @@ for epoch in range(training_epochs):
...
@@ -43,6 +49,7 @@ for epoch in range(training_epochs):
avg_cost
+=
cost
/
n_samples
*
batch_size
avg_cost
+=
cost
/
n_samples
*
batch_size
if
epoch
%
display_step
==
0
:
if
epoch
%
display_step
==
0
:
print
(
"Epoch:"
,
'%04d'
%
(
epoch
+
1
),
"cost="
,
"{:.9f}"
.
format
(
avg_cost
))
print
(
"Epoch:"
,
'%d,'
%
(
epoch
+
1
),
"Cost:"
,
"{:.9f}"
.
format
(
avg_cost
))
print
(
"Total cost: "
+
str
(
autoencoder
.
calc_total_cost
(
X_test
)))
print
(
"Total cost: "
+
str
(
autoencoder
.
calc_total_cost
(
X_test
)))
autoencoder/VariationalAutoencoderRunner.py
View file @
3bf85a4e
import
numpy
as
np
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
numpy
as
np
import
sklearn.preprocessing
as
prep
import
sklearn.preprocessing
as
prep
import
tensorflow
as
tf
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
from
tensorflow.examples.tutorials.mnist
import
input_data
from
autoencoder_models.VariationalAutoencoder
import
VariationalAutoencoder
from
autoencoder_models.VariationalAutoencoder
import
VariationalAutoencoder
mnist
=
input_data
.
read_data_sets
(
'MNIST_data'
,
one_hot
=
True
)
mnist
=
input_data
.
read_data_sets
(
'MNIST_data'
,
one_hot
=
True
)
def
min_max_scale
(
X_train
,
X_test
):
def
min_max_scale
(
X_train
,
X_test
):
...
@@ -29,9 +31,10 @@ training_epochs = 20
...
@@ -29,9 +31,10 @@ training_epochs = 20
batch_size
=
128
batch_size
=
128
display_step
=
1
display_step
=
1
autoencoder
=
VariationalAutoencoder
(
n_input
=
784
,
autoencoder
=
VariationalAutoencoder
(
n_hidden
=
200
,
n_input
=
784
,
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
0.001
))
n_hidden
=
200
,
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
0.001
))
for
epoch
in
range
(
training_epochs
):
for
epoch
in
range
(
training_epochs
):
avg_cost
=
0.
avg_cost
=
0.
...
@@ -47,6 +50,7 @@ for epoch in range(training_epochs):
...
@@ -47,6 +50,7 @@ for epoch in range(training_epochs):
# Display logs per epoch step
# Display logs per epoch step
if
epoch
%
display_step
==
0
:
if
epoch
%
display_step
==
0
:
print
(
"Epoch:"
,
'%04d'
%
(
epoch
+
1
),
"cost="
,
"{:.9f}"
.
format
(
avg_cost
))
print
(
"Epoch:"
,
'%d,'
%
(
epoch
+
1
),
"Cost:"
,
"{:.9f}"
.
format
(
avg_cost
))
print
(
"Total cost: "
+
str
(
autoencoder
.
calc_total_cost
(
X_test
)))
print
(
"Total cost: "
+
str
(
autoencoder
.
calc_total_cost
(
X_test
)))
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment