Commit aef35824 authored by Joshua Howard's avatar Joshua Howard Committed by Neal Wu
Browse files

Removed external dependencies from autoencoder models

parent 79d2ecb1
......@@ -4,7 +4,7 @@ import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder.autoencoder_models.DenoisingAutoencoder import AdditiveGaussianNoiseAutoencoder
from autoencoder_models.DenoisingAutoencoder import AdditiveGaussianNoiseAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
......@@ -45,7 +45,6 @@ for epoch in range(training_epochs):
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), \
"cost=", "{:.9f}".format(avg_cost)
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
......@@ -4,7 +4,7 @@ import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder.autoencoder_models.Autoencoder import Autoencoder
from autoencoder_models.Autoencoder import Autoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
......@@ -44,7 +44,6 @@ for epoch in range(training_epochs):
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), \
"cost=", "{:.9f}".format(avg_cost)
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
......@@ -4,7 +4,7 @@ import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder.autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder
from autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
......@@ -43,7 +43,6 @@ for epoch in range(training_epochs):
avg_cost += cost / n_samples * batch_size
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), \
"cost=", "{:.9f}".format(avg_cost)
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
import numpy as np
import tensorflow as tf
def xavier_init(fan_in, fan_out, constant = 1):
low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
high = constant * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval = low, maxval = high,
dtype = tf.float32)
......@@ -4,7 +4,7 @@ import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder.autoencoder_models.VariationalAutoencoder import VariationalAutoencoder
from autoencoder_models.VariationalAutoencoder import VariationalAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
......@@ -47,7 +47,6 @@ for epoch in range(training_epochs):
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch + 1), \
"cost=", "{:.9f}".format(avg_cost)
print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print "Total cost: " + str(autoencoder.calc_total_cost(X_test))
print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
import tensorflow as tf
import numpy as np
import autoencoder.Utils
class Autoencoder(object):
......@@ -28,7 +26,8 @@ class Autoencoder(object):
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden))
all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden],
initializer=tf.contrib.layers.xavier_initializer())
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
......@@ -46,7 +45,7 @@ class Autoencoder(object):
def generate(self, hidden = None):
if hidden is None:
hidden = np.random.normal(size=self.weights["b1"])
hidden = self.sess.run(tf.random_normal([1, self.n_hidden]))
return self.sess.run(self.reconstruction, feed_dict={self.hidden: hidden})
def reconstruct(self, X):
......
import tensorflow as tf
import numpy as np
import autoencoder.Utils
class AdditiveGaussianNoiseAutoencoder(object):
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus, optimizer = tf.train.AdamOptimizer(),
......@@ -31,7 +28,8 @@ class AdditiveGaussianNoiseAutoencoder(object):
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden))
all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden],
initializer=tf.contrib.layers.xavier_initializer())
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype = tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype = tf.float32))
......@@ -53,9 +51,9 @@ class AdditiveGaussianNoiseAutoencoder(object):
self.scale: self.training_scale
})
def generate(self, hidden = None):
def generate(self, hidden=None):
if hidden is None:
hidden = np.random.normal(size = self.weights["b1"])
hidden = self.sess.run(tf.random_normal([1, self.n_hidden]))
return self.sess.run(self.reconstruction, feed_dict = {self.hidden: hidden})
def reconstruct(self, X):
......@@ -98,7 +96,8 @@ class MaskingNoiseAutoencoder(object):
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden))
all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden],
initializer=tf.contrib.layers.xavier_initializer())
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype = tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype = tf.float32))
......@@ -115,9 +114,9 @@ class MaskingNoiseAutoencoder(object):
def transform(self, X):
return self.sess.run(self.hidden, feed_dict = {self.x: X, self.keep_prob: 1.0})
def generate(self, hidden = None):
def generate(self, hidden=None):
if hidden is None:
hidden = np.random.normal(size = self.weights["b1"])
hidden = self.sess.run(tf.random_normal([1, self.n_hidden]))
return self.sess.run(self.reconstruction, feed_dict = {self.hidden: hidden})
def reconstruct(self, X):
......
import tensorflow as tf
import numpy as np
import autoencoder.Utils
class VariationalAutoencoder(object):
......@@ -36,8 +35,10 @@ class VariationalAutoencoder(object):
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden))
all_weights['log_sigma_w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden))
all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden],
initializer=tf.contrib.layers.xavier_initializer())
all_weights['log_sigma_w1'] = tf.get_variable("log_sigma_w1", shape=[self.n_input, self.n_hidden],
initializer=tf.contrib.layers.xavier_initializer())
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['log_sigma_b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment