Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
ResNet50_tensorflow
Commits
70702f79
Commit
70702f79
authored
Mar 17, 2016
by
Jiří Vahala
Browse files
Autoencoders
parent
574c981c
Changes
12
Show whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
480 additions
and
0 deletions
+480
-0
.gitignore
.gitignore
+2
-0
README
README
+1
-0
autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py
autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py
+51
-0
autoencoder/AutoencoderRunner.py
autoencoder/AutoencoderRunner.py
+50
-0
autoencoder/MaskingNoiseAutoencoderRunner.py
autoencoder/MaskingNoiseAutoencoderRunner.py
+49
-0
autoencoder/Utils.py
autoencoder/Utils.py
+9
-0
autoencoder/VariationalAutoencoderRunner.py
autoencoder/VariationalAutoencoderRunner.py
+53
-0
autoencoder/__init__.py
autoencoder/__init__.py
+0
-0
autoencoder/autoencoder_models/Autoencoder.py
autoencoder/autoencoder_models/Autoencoder.py
+60
-0
autoencoder/autoencoder_models/DenoisingAutoencoder.py
autoencoder/autoencoder_models/DenoisingAutoencoder.py
+130
-0
autoencoder/autoencoder_models/VariationalAutoencoder.py
autoencoder/autoencoder_models/VariationalAutoencoder.py
+75
-0
autoencoder/autoencoder_models/__init__.py
autoencoder/autoencoder_models/__init__.py
+0
-0
No files found.
.gitignore
0 → 100644
View file @
70702f79
autoencoder/MNIST_data/*
*.pyc
README
0 → 100644
View file @
70702f79
Very simple implementations of some autoencoder variations
autoencoder/AdditiveGaussianNoiseAutoencoderRunner.py
0 → 100644
View file @
70702f79
import
numpy
as
np
import
sklearn.preprocessing
as
prep
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
from
autoencoder.autoencoder_models.DenoisingAutoencoder
import
AdditiveGaussianNoiseAutoencoder
mnist
=
input_data
.
read_data_sets
(
'MNIST_data'
,
one_hot
=
True
)
def
standard_scale
(
X_train
,
X_test
):
preprocessor
=
prep
.
StandardScaler
().
fit
(
X_train
)
X_train
=
preprocessor
.
transform
(
X_train
)
X_test
=
preprocessor
.
transform
(
X_test
)
return
X_train
,
X_test
def
get_random_block_from_data
(
data
,
batch_size
):
start_index
=
np
.
random
.
randint
(
0
,
len
(
data
)
-
batch_size
)
return
data
[
start_index
:(
start_index
+
batch_size
)]
X_train
,
X_test
=
standard_scale
(
mnist
.
train
.
images
,
mnist
.
test
.
images
)
n_samples
=
int
(
mnist
.
train
.
num_examples
)
training_epochs
=
20
batch_size
=
128
display_step
=
1
autoencoder
=
AdditiveGaussianNoiseAutoencoder
(
n_input
=
784
,
n_hidden
=
200
,
transfer_function
=
tf
.
nn
.
softplus
,
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
0.001
),
scale
=
0.01
)
for
epoch
in
range
(
training_epochs
):
avg_cost
=
0.
total_batch
=
int
(
n_samples
/
batch_size
)
# Loop over all batches
for
i
in
range
(
total_batch
):
batch_xs
=
get_random_block_from_data
(
X_train
,
batch_size
)
# Fit training using batch data
cost
=
autoencoder
.
partial_fit
(
batch_xs
)
# Compute average loss
avg_cost
+=
cost
/
n_samples
*
batch_size
# Display logs per epoch step
if
epoch
%
display_step
==
0
:
print
"Epoch:"
,
'%04d'
%
(
epoch
+
1
),
\
"cost="
,
"{:.9f}"
.
format
(
avg_cost
)
print
"Total cost: "
+
str
(
autoencoder
.
calc_total_cost
(
X_test
))
autoencoder/AutoencoderRunner.py
0 → 100644
View file @
70702f79
import
numpy
as
np
import
sklearn.preprocessing
as
prep
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
from
autoencoder.autoencoder_models.Autoencoder
import
Autoencoder
mnist
=
input_data
.
read_data_sets
(
'MNIST_data'
,
one_hot
=
True
)
def
standard_scale
(
X_train
,
X_test
):
preprocessor
=
prep
.
StandardScaler
().
fit
(
X_train
)
X_train
=
preprocessor
.
transform
(
X_train
)
X_test
=
preprocessor
.
transform
(
X_test
)
return
X_train
,
X_test
def
get_random_block_from_data
(
data
,
batch_size
):
start_index
=
np
.
random
.
randint
(
0
,
len
(
data
)
-
batch_size
)
return
data
[
start_index
:(
start_index
+
batch_size
)]
X_train
,
X_test
=
standard_scale
(
mnist
.
train
.
images
,
mnist
.
test
.
images
)
n_samples
=
int
(
mnist
.
train
.
num_examples
)
training_epochs
=
20
batch_size
=
128
display_step
=
1
autoencoder
=
Autoencoder
(
n_input
=
784
,
n_hidden
=
200
,
transfer_function
=
tf
.
nn
.
softplus
,
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
0.001
))
for
epoch
in
range
(
training_epochs
):
avg_cost
=
0.
total_batch
=
int
(
n_samples
/
batch_size
)
# Loop over all batches
for
i
in
range
(
total_batch
):
batch_xs
=
get_random_block_from_data
(
X_train
,
batch_size
)
# Fit training using batch data
cost
=
autoencoder
.
partial_fit
(
batch_xs
)
# Compute average loss
avg_cost
+=
cost
/
n_samples
*
batch_size
# Display logs per epoch step
if
epoch
%
display_step
==
0
:
print
"Epoch:"
,
'%04d'
%
(
epoch
+
1
),
\
"cost="
,
"{:.9f}"
.
format
(
avg_cost
)
print
"Total cost: "
+
str
(
autoencoder
.
calc_total_cost
(
X_test
))
autoencoder/MaskingNoiseAutoencoderRunner.py
0 → 100644
View file @
70702f79
import
numpy
as
np
import
sklearn.preprocessing
as
prep
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
from
autoencoder.autoencoder_models.DenoisingAutoencoder
import
MaskingNoiseAutoencoder
mnist
=
input_data
.
read_data_sets
(
'MNIST_data'
,
one_hot
=
True
)
def
standard_scale
(
X_train
,
X_test
):
preprocessor
=
prep
.
StandardScaler
().
fit
(
X_train
)
X_train
=
preprocessor
.
transform
(
X_train
)
X_test
=
preprocessor
.
transform
(
X_test
)
return
X_train
,
X_test
def
get_random_block_from_data
(
data
,
batch_size
):
start_index
=
np
.
random
.
randint
(
0
,
len
(
data
)
-
batch_size
)
return
data
[
start_index
:(
start_index
+
batch_size
)]
X_train
,
X_test
=
standard_scale
(
mnist
.
train
.
images
,
mnist
.
test
.
images
)
n_samples
=
int
(
mnist
.
train
.
num_examples
)
training_epochs
=
100
batch_size
=
128
display_step
=
1
autoencoder
=
MaskingNoiseAutoencoder
(
n_input
=
784
,
n_hidden
=
200
,
transfer_function
=
tf
.
nn
.
softplus
,
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
0.001
),
dropout_probability
=
0.95
)
for
epoch
in
range
(
training_epochs
):
avg_cost
=
0.
total_batch
=
int
(
n_samples
/
batch_size
)
for
i
in
range
(
total_batch
):
batch_xs
=
get_random_block_from_data
(
X_train
,
batch_size
)
cost
=
autoencoder
.
partial_fit
(
batch_xs
)
avg_cost
+=
cost
/
n_samples
*
batch_size
if
epoch
%
display_step
==
0
:
print
"Epoch:"
,
'%04d'
%
(
epoch
+
1
),
\
"cost="
,
"{:.9f}"
.
format
(
avg_cost
)
print
"Total cost: "
+
str
(
autoencoder
.
calc_total_cost
(
X_test
))
autoencoder/Utils.py
0 → 100644
View file @
70702f79
import
numpy
as
np
import
tensorflow
as
tf
def
xavier_init
(
fan_in
,
fan_out
,
constant
=
1
):
low
=
-
constant
*
np
.
sqrt
(
6.0
/
(
fan_in
+
fan_out
))
high
=
constant
*
np
.
sqrt
(
6.0
/
(
fan_in
+
fan_out
))
return
tf
.
random_uniform
((
fan_in
,
fan_out
),
minval
=
low
,
maxval
=
high
,
dtype
=
tf
.
float32
)
autoencoder/VariationalAutoencoderRunner.py
0 → 100644
View file @
70702f79
import
numpy
as
np
import
sklearn.preprocessing
as
prep
import
tensorflow
as
tf
from
tensorflow.examples.tutorials.mnist
import
input_data
from
autoencoder.autoencoder_models.VariationalAutoencoder
import
VariationalAutoencoder
mnist
=
input_data
.
read_data_sets
(
'MNIST_data'
,
one_hot
=
True
)
def
standard_scale
(
X_train
,
X_test
):
preprocessor
=
prep
.
StandardScaler
().
fit
(
X_train
)
X_train
=
preprocessor
.
transform
(
X_train
)
X_test
=
preprocessor
.
transform
(
X_test
)
return
X_train
,
X_test
def
get_random_block_from_data
(
data
,
batch_size
):
start_index
=
np
.
random
.
randint
(
0
,
len
(
data
)
-
batch_size
)
return
data
[
start_index
:(
start_index
+
batch_size
)]
X_train
,
X_test
=
standard_scale
(
mnist
.
train
.
images
,
mnist
.
test
.
images
)
n_samples
=
int
(
mnist
.
train
.
num_examples
)
training_epochs
=
20
batch_size
=
128
display_step
=
1
autoencoder
=
VariationalAutoencoder
(
n_input
=
784
,
n_hidden
=
200
,
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
0.001
),
gaussian_sample_size
=
128
)
for
epoch
in
range
(
training_epochs
):
avg_cost
=
0.
total_batch
=
int
(
n_samples
/
batch_size
)
# Loop over all batches
for
i
in
range
(
total_batch
):
batch_xs
=
get_random_block_from_data
(
X_train
,
batch_size
)
# Fit training using batch data
cost
=
autoencoder
.
partial_fit
(
batch_xs
)
# Compute average loss
avg_cost
+=
cost
/
n_samples
*
batch_size
# Display logs per epoch step
if
epoch
%
display_step
==
0
:
print
"Epoch:"
,
'%04d'
%
(
epoch
+
1
),
\
"cost="
,
"{:.9f}"
.
format
(
avg_cost
)
print
"Total cost: "
+
str
(
autoencoder
.
calc_total_cost
(
X_test
))
autoencoder/__init__.py
0 → 100644
View file @
70702f79
autoencoder/autoencoder_models/Autoencoder.py
0 → 100644
View file @
70702f79
import
tensorflow
as
tf
import
numpy
as
np
import
autoencoder.Utils
class
Autoencoder
(
object
):
def
__init__
(
self
,
n_input
,
n_hidden
,
transfer_function
=
tf
.
nn
.
softplus
,
optimizer
=
tf
.
train
.
AdamOptimizer
()):
self
.
n_input
=
n_input
self
.
n_hidden
=
n_hidden
self
.
transfer
=
transfer_function
network_weights
=
self
.
_initialize_weights
()
self
.
weights
=
network_weights
# model
self
.
x
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
n_input
])
self
.
hidden
=
self
.
transfer
(
tf
.
add
(
tf
.
matmul
(
self
.
x
,
self
.
weights
[
'w1'
]),
self
.
weights
[
'b1'
]))
self
.
reconstruction
=
tf
.
add
(
tf
.
matmul
(
self
.
hidden
,
self
.
weights
[
'w2'
]),
self
.
weights
[
'b2'
])
# cost
self
.
cost
=
0.5
*
tf
.
reduce_sum
(
tf
.
pow
(
tf
.
sub
(
self
.
reconstruction
,
self
.
x
),
2.0
))
self
.
optimizer
=
optimizer
.
minimize
(
self
.
cost
)
init
=
tf
.
initialize_all_variables
()
self
.
sess
=
tf
.
Session
()
self
.
sess
.
run
(
init
)
def
_initialize_weights
(
self
):
all_weights
=
dict
()
all_weights
[
'w1'
]
=
tf
.
Variable
(
autoencoder
.
Utils
.
xavier_init
(
self
.
n_input
,
self
.
n_hidden
))
all_weights
[
'b1'
]
=
tf
.
Variable
(
tf
.
zeros
([
self
.
n_hidden
],
dtype
=
tf
.
float32
))
all_weights
[
'w2'
]
=
tf
.
Variable
(
tf
.
zeros
([
self
.
n_hidden
,
self
.
n_input
],
dtype
=
tf
.
float32
))
all_weights
[
'b2'
]
=
tf
.
Variable
(
tf
.
zeros
([
self
.
n_input
],
dtype
=
tf
.
float32
))
return
all_weights
def
partial_fit
(
self
,
X
):
cost
,
opt
=
self
.
sess
.
run
((
self
.
cost
,
self
.
optimizer
),
feed_dict
=
{
self
.
x
:
X
})
return
cost
def
calc_total_cost
(
self
,
X
):
return
self
.
sess
.
run
(
self
.
cost
,
feed_dict
=
{
self
.
x
:
X
})
def
transform
(
self
,
X
):
return
self
.
sess
.
run
(
self
.
hidden
,
feed_dict
=
{
self
.
x
:
X
})
def
generate
(
self
,
hidden
=
None
):
if
hidden
is
None
:
hidden
=
np
.
random
.
normal
(
size
=
self
.
weights
[
"b1"
])
return
self
.
sess
.
run
(
self
.
reconstruction
,
feed_dict
=
{
self
.
hidden
:
hidden
})
def
reconstruct
(
self
,
X
):
return
self
.
sess
.
run
(
self
.
reconstruction
,
feed_dict
=
{
self
.
x
:
X
})
def
getWeights
(
self
):
return
self
.
sess
.
run
(
self
.
weights
[
'w1'
])
def
getBiases
(
self
):
return
self
.
sess
.
run
(
self
.
weights
[
'b1'
])
autoencoder/autoencoder_models/DenoisingAutoencoder.py
0 → 100644
View file @
70702f79
import
tensorflow
as
tf
import
numpy
as
np
import
autoencoder.Utils
class
AdditiveGaussianNoiseAutoencoder
(
object
):
def
__init__
(
self
,
n_input
,
n_hidden
,
transfer_function
=
tf
.
nn
.
softplus
,
optimizer
=
tf
.
train
.
AdamOptimizer
(),
scale
=
0.1
):
self
.
n_input
=
n_input
self
.
n_hidden
=
n_hidden
self
.
transfer
=
transfer_function
self
.
scale
=
tf
.
placeholder
(
tf
.
float32
)
self
.
training_scale
=
scale
network_weights
=
self
.
_initialize_weights
()
self
.
weights
=
network_weights
# model
self
.
x
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
n_input
])
self
.
hidden
=
self
.
transfer
(
tf
.
add
(
tf
.
matmul
(
self
.
x
+
scale
*
tf
.
random_normal
((
n_input
,)),
self
.
weights
[
'w1'
]),
self
.
weights
[
'b1'
]))
self
.
reconstruction
=
tf
.
add
(
tf
.
matmul
(
self
.
hidden
,
self
.
weights
[
'w2'
]),
self
.
weights
[
'b2'
])
# cost
self
.
cost
=
0.5
*
tf
.
reduce_sum
(
tf
.
pow
(
tf
.
sub
(
self
.
reconstruction
,
self
.
x
),
2.0
))
self
.
optimizer
=
optimizer
.
minimize
(
self
.
cost
)
init
=
tf
.
initialize_all_variables
()
self
.
sess
=
tf
.
Session
()
self
.
sess
.
run
(
init
)
def
_initialize_weights
(
self
):
all_weights
=
dict
()
all_weights
[
'w1'
]
=
tf
.
Variable
(
autoencoder
.
Utils
.
xavier_init
(
self
.
n_input
,
self
.
n_hidden
))
all_weights
[
'b1'
]
=
tf
.
Variable
(
tf
.
zeros
([
self
.
n_hidden
],
dtype
=
tf
.
float32
))
all_weights
[
'w2'
]
=
tf
.
Variable
(
tf
.
zeros
([
self
.
n_hidden
,
self
.
n_input
],
dtype
=
tf
.
float32
))
all_weights
[
'b2'
]
=
tf
.
Variable
(
tf
.
zeros
([
self
.
n_input
],
dtype
=
tf
.
float32
))
return
all_weights
def
partial_fit
(
self
,
X
):
cost
,
opt
=
self
.
sess
.
run
((
self
.
cost
,
self
.
optimizer
),
feed_dict
=
{
self
.
x
:
X
,
self
.
scale
:
self
.
training_scale
})
return
cost
def
calc_total_cost
(
self
,
X
):
return
self
.
sess
.
run
(
self
.
cost
,
feed_dict
=
{
self
.
x
:
X
,
self
.
scale
:
self
.
training_scale
})
def
transform
(
self
,
X
):
return
self
.
sess
.
run
(
self
.
hidden
,
feed_dict
=
{
self
.
x
:
X
,
self
.
scale
:
self
.
training_scale
})
def
generate
(
self
,
hidden
=
None
):
if
hidden
is
None
:
hidden
=
np
.
random
.
normal
(
size
=
self
.
weights
[
"b1"
])
return
self
.
sess
.
run
(
self
.
reconstruction
,
feed_dict
=
{
self
.
hidden
:
hidden
})
def
reconstruct
(
self
,
X
):
return
self
.
sess
.
run
(
self
.
reconstruction
,
feed_dict
=
{
self
.
x
:
X
,
self
.
scale
:
self
.
training_scale
})
def
getWeights
(
self
):
return
self
.
sess
.
run
(
self
.
weights
[
'w1'
])
def
getBiases
(
self
):
return
self
.
sess
.
run
(
self
.
weights
[
'b1'
])
class
MaskingNoiseAutoencoder
(
object
):
def
__init__
(
self
,
n_input
,
n_hidden
,
transfer_function
=
tf
.
nn
.
softplus
,
optimizer
=
tf
.
train
.
AdamOptimizer
(),
dropout_probability
=
0.95
):
self
.
n_input
=
n_input
self
.
n_hidden
=
n_hidden
self
.
transfer
=
transfer_function
self
.
dropout_probability
=
dropout_probability
self
.
keep_prob
=
tf
.
placeholder
(
tf
.
float32
)
network_weights
=
self
.
_initialize_weights
()
self
.
weights
=
network_weights
# model
self
.
x
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
n_input
])
self
.
hidden
=
self
.
transfer
(
tf
.
add
(
tf
.
matmul
(
tf
.
nn
.
dropout
(
self
.
x
,
self
.
keep_prob
),
self
.
weights
[
'w1'
]),
self
.
weights
[
'b1'
]))
self
.
reconstruction
=
tf
.
add
(
tf
.
matmul
(
self
.
hidden
,
self
.
weights
[
'w2'
]),
self
.
weights
[
'b2'
])
# cost
self
.
cost
=
0.5
*
tf
.
reduce_sum
(
tf
.
pow
(
tf
.
sub
(
self
.
reconstruction
,
self
.
x
),
2.0
))
self
.
optimizer
=
optimizer
.
minimize
(
self
.
cost
)
init
=
tf
.
initialize_all_variables
()
self
.
sess
=
tf
.
Session
()
self
.
sess
.
run
(
init
)
def
_initialize_weights
(
self
):
all_weights
=
dict
()
all_weights
[
'w1'
]
=
tf
.
Variable
(
autoencoder
.
Utils
.
xavier_init
(
self
.
n_input
,
self
.
n_hidden
))
all_weights
[
'b1'
]
=
tf
.
Variable
(
tf
.
zeros
([
self
.
n_hidden
],
dtype
=
tf
.
float32
))
all_weights
[
'w2'
]
=
tf
.
Variable
(
tf
.
zeros
([
self
.
n_hidden
,
self
.
n_input
],
dtype
=
tf
.
float32
))
all_weights
[
'b2'
]
=
tf
.
Variable
(
tf
.
zeros
([
self
.
n_input
],
dtype
=
tf
.
float32
))
return
all_weights
def
partial_fit
(
self
,
X
):
cost
,
opt
=
self
.
sess
.
run
((
self
.
cost
,
self
.
optimizer
),
feed_dict
=
{
self
.
x
:
X
,
self
.
keep_prob
:
self
.
dropout_probability
})
return
cost
def
calc_total_cost
(
self
,
X
):
return
self
.
sess
.
run
(
self
.
cost
,
feed_dict
=
{
self
.
x
:
X
,
self
.
keep_prob
:
1.0
})
def
transform
(
self
,
X
):
return
self
.
sess
.
run
(
self
.
hidden
,
feed_dict
=
{
self
.
x
:
X
,
self
.
keep_prob
:
1.0
})
def
generate
(
self
,
hidden
=
None
):
if
hidden
is
None
:
hidden
=
np
.
random
.
normal
(
size
=
self
.
weights
[
"b1"
])
return
self
.
sess
.
run
(
self
.
reconstruction
,
feed_dict
=
{
self
.
hidden
:
hidden
})
def
reconstruct
(
self
,
X
):
return
self
.
sess
.
run
(
self
.
reconstruction
,
feed_dict
=
{
self
.
x
:
X
,
self
.
keep_prob
:
1.0
})
def
getWeights
(
self
):
return
self
.
sess
.
run
(
self
.
weights
[
'w1'
])
def
getBiases
(
self
):
return
self
.
sess
.
run
(
self
.
weights
[
'b1'
])
autoencoder/autoencoder_models/VariationalAutoencoder.py
0 → 100644
View file @
70702f79
import
tensorflow
as
tf
import
numpy
as
np
import
autoencoder.Utils
class
VariationalAutoencoder
(
object
):
def
__init__
(
self
,
n_input
,
n_hidden
,
optimizer
=
tf
.
train
.
AdamOptimizer
(),
gaussian_sample_size
=
128
):
self
.
n_input
=
n_input
self
.
n_hidden
=
n_hidden
self
.
gaussian_sample_size
=
gaussian_sample_size
network_weights
=
self
.
_initialize_weights
()
self
.
weights
=
network_weights
# model
self
.
x
=
tf
.
placeholder
(
tf
.
float32
,
[
None
,
self
.
n_input
])
self
.
z_mean
=
tf
.
add
(
tf
.
matmul
(
self
.
x
,
self
.
weights
[
'w1'
]),
self
.
weights
[
'b1'
])
self
.
z_log_sigma_sq
=
tf
.
add
(
tf
.
matmul
(
self
.
x
,
self
.
weights
[
'log_sigma_w1'
]),
self
.
weights
[
'log_sigma_b1'
])
# sample from gaussian distribution
eps
=
tf
.
random_normal
((
self
.
gaussian_sample_size
,
n_hidden
),
0
,
1
,
dtype
=
tf
.
float32
)
self
.
z
=
tf
.
add
(
self
.
z_mean
,
tf
.
mul
(
tf
.
sqrt
(
tf
.
exp
(
self
.
z_log_sigma_sq
)),
eps
))
self
.
reconstruction
=
tf
.
add
(
tf
.
matmul
(
self
.
z
,
self
.
weights
[
'w2'
]),
self
.
weights
[
'b2'
])
# cost
reconstr_loss
=
0.5
*
tf
.
reduce_sum
(
tf
.
pow
(
tf
.
sub
(
self
.
reconstruction
,
self
.
x
),
2.0
))
latent_loss
=
-
0.5
*
tf
.
reduce_sum
(
1
+
self
.
z_log_sigma_sq
-
tf
.
square
(
self
.
z_mean
)
-
tf
.
exp
(
self
.
z_log_sigma_sq
),
1
)
self
.
cost
=
tf
.
reduce_mean
(
reconstr_loss
+
latent_loss
)
self
.
optimizer
=
optimizer
.
minimize
(
self
.
cost
)
init
=
tf
.
initialize_all_variables
()
self
.
sess
=
tf
.
Session
()
self
.
sess
.
run
(
init
)
def
_initialize_weights
(
self
):
all_weights
=
dict
()
all_weights
[
'w1'
]
=
tf
.
Variable
(
autoencoder
.
Utils
.
xavier_init
(
self
.
n_input
,
self
.
n_hidden
))
all_weights
[
'log_sigma_w1'
]
=
tf
.
Variable
(
autoencoder
.
Utils
.
xavier_init
(
self
.
n_input
,
self
.
n_hidden
))
all_weights
[
'b1'
]
=
tf
.
Variable
(
tf
.
zeros
([
self
.
n_hidden
],
dtype
=
tf
.
float32
))
all_weights
[
'log_sigma_b1'
]
=
tf
.
Variable
(
tf
.
zeros
([
self
.
n_hidden
],
dtype
=
tf
.
float32
))
all_weights
[
'w2'
]
=
tf
.
Variable
(
tf
.
zeros
([
self
.
n_hidden
,
self
.
n_input
],
dtype
=
tf
.
float32
))
all_weights
[
'b2'
]
=
tf
.
Variable
(
tf
.
zeros
([
self
.
n_input
],
dtype
=
tf
.
float32
))
return
all_weights
def
partial_fit
(
self
,
X
):
cost
,
opt
=
self
.
sess
.
run
((
self
.
cost
,
self
.
optimizer
),
feed_dict
=
{
self
.
x
:
X
})
return
cost
def
calc_total_cost
(
self
,
X
):
return
self
.
sess
.
run
(
self
.
cost
,
feed_dict
=
{
self
.
x
:
X
})
def
transform
(
self
,
X
):
return
self
.
sess
.
run
(
self
.
z_mean
,
feed_dict
=
{
self
.
x
:
X
})
def
generate
(
self
,
hidden
=
None
):
if
hidden
is
None
:
hidden
=
np
.
random
.
normal
(
size
=
self
.
weights
[
"b1"
])
return
self
.
sess
.
run
(
self
.
reconstruction
,
feed_dict
=
{
self
.
z_mean
:
hidden
})
def
reconstruct
(
self
,
X
):
return
self
.
sess
.
run
(
self
.
reconstruction
,
feed_dict
=
{
self
.
x
:
X
})
def
getWeights
(
self
):
return
self
.
sess
.
run
(
self
.
weights
[
'w1'
])
def
getBiases
(
self
):
return
self
.
sess
.
run
(
self
.
weights
[
'b1'
])
autoencoder/autoencoder_models/__init__.py
0 → 100644
View file @
70702f79
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment