Commit a2f1732d authored by Swaroop Guntupalli's avatar Swaroop Guntupalli Committed by Karmel Allison
Browse files

ENH: Updated autoencoder to be of any given depth (#1052)

* ENH: Modified autoencoder to accept multilayer autoencoders

* ENH: Modified autoencoder to accept multilayer autoencoders

* Cleanup of commented code.
parent 39326fb6
...@@ -31,11 +31,9 @@ training_epochs = 20 ...@@ -31,11 +31,9 @@ training_epochs = 20
batch_size = 128 batch_size = 128
display_step = 1 display_step = 1
autoencoder = Autoencoder( autoencoder = Autoencoder(n_layers=[784, 200],
n_input=784, transfer_function = tf.nn.softplus,
n_hidden=200, optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))
transfer_function=tf.nn.softplus,
optimizer=tf.train.AdamOptimizer(learning_rate=0.001))
for epoch in range(training_epochs): for epoch in range(training_epochs):
avg_cost = 0. avg_cost = 0.
......
...@@ -2,18 +2,30 @@ import tensorflow as tf ...@@ -2,18 +2,30 @@ import tensorflow as tf
class Autoencoder(object): class Autoencoder(object):
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer = tf.train.AdamOptimizer()): def __init__(self, n_layers, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer()):
self.n_input = n_input self.n_layers = n_layers
self.n_hidden = n_hidden
self.transfer = transfer_function self.transfer = transfer_function
network_weights = self._initialize_weights() network_weights = self._initialize_weights()
self.weights = network_weights self.weights = network_weights
# model # model
self.x = tf.placeholder(tf.float32, [None, self.n_input]) self.x = tf.placeholder(tf.float32, [None, self.n_layers[0]])
self.hidden = self.transfer(tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1'])) self.hidden_encode = []
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2']) h = self.x
for layer in range(len(self.n_layers)-1):
h = self.transfer(
tf.add(tf.matmul(h, self.weights['encode'][layer]['w']),
self.weights['encode'][layer]['b']))
self.hidden_encode.append(h)
self.hidden_recon = []
for layer in range(len(self.n_layers)-1):
h = self.transfer(
tf.add(tf.matmul(h, self.weights['recon'][layer]['w']),
self.weights['recon'][layer]['b']))
self.hidden_recon.append(h)
self.reconstruction = self.hidden_recon[-1]
# cost # cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0)) self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
...@@ -26,11 +38,24 @@ class Autoencoder(object): ...@@ -26,11 +38,24 @@ class Autoencoder(object):
def _initialize_weights(self): def _initialize_weights(self):
all_weights = dict() all_weights = dict()
all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden], # Encoding network weights
initializer=tf.contrib.layers.xavier_initializer()) encoder_weights = []
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32)) for layer in range(len(self.n_layers)-1):
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32)) w = tf.Variable(
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32)) autoencoder.Utils.xavier_init(self.n_layers[layer],
self.n_layers[layer + 1]))
b = tf.Variable(tf.zeros([self.n_layers[layer+1]], dtype=tf.float32))
encoder_weights.append({'w': w, 'b': b})
# Recon network weights
recon_weights = []
for layer in range(len(self.n_layers)-1, 0, -1):
w = tf.Variable(
autoencoder.Utils.xavier_init(self.n_layers[layer],
self.n_layers[layer - 1]))
b = tf.Variable(tf.zeros([self.n_layers[layer-1]], dtype=tf.float32))
recon_weights.append({'w': w, 'b': b})
all_weights['encode'] = encoder_weights
all_weights['recon'] = recon_weights
return all_weights return all_weights
def partial_fit(self, X): def partial_fit(self, X):
...@@ -38,22 +63,24 @@ class Autoencoder(object): ...@@ -38,22 +63,24 @@ class Autoencoder(object):
return cost return cost
def calc_total_cost(self, X): def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X}) return self.sess.run(self.cost, feed_dict={self.x: X})
def transform(self, X): def transform(self, X):
return self.sess.run(self.hidden, feed_dict={self.x: X}) return self.sess.run(self.hidden_encode[-1], feed_dict={self.x: X})
def generate(self, hidden = None): def generate(self, hidden=None):
if hidden is None: if hidden is None:
hidden = self.sess.run(tf.random_normal([1, self.n_hidden])) hidden = np.random.normal(size=self.weights['encode'][-1]['b'])
return self.sess.run(self.reconstruction, feed_dict={self.hidden: hidden}) return self.sess.run(self.reconstruction, feed_dict={self.hidden_encode[-1]: hidden})
def reconstruct(self, X): def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X}) return self.sess.run(self.reconstruction, feed_dict={self.x: X})
def getWeights(self): def getWeights(self):
return self.sess.run(self.weights['w1']) raise NotImplementedError
return self.sess.run(self.weights)
def getBiases(self): def getBiases(self):
return self.sess.run(self.weights['b1']) raise NotImplementedError
return self.sess.run(self.weights)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment