-
Notifications
You must be signed in to change notification settings - Fork 4
/
Stacked_AutoEncoder.py
103 lines (60 loc) · 2.42 KB
/
Stacked_AutoEncoder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 2 14:42:38 2018
@author: Tathagat Dasgupta
"""
import os
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.layers import fully_connected
mnist_data_dir = os.path.join(Path.home(),"MNIST_data/")
mnist=input_data.read_data_sets(mnist_data_dir,one_hot=True)
tf.reset_default_graph()
num_inputs=784 #28x28 pixels
num_hid1=392
num_hid2=196
num_hid3=num_hid1
num_output=num_inputs
lr=0.01
actf=tf.nn.relu
X=tf.placeholder(tf.float32,shape=[None,num_inputs])
initializer=tf.variance_scaling_initializer()
w1=tf.Variable(initializer([num_inputs,num_hid1]),dtype=tf.float32)
w2=tf.Variable(initializer([num_hid1,num_hid2]),dtype=tf.float32)
w3=tf.Variable(initializer([num_hid2,num_hid3]),dtype=tf.float32)
w4=tf.Variable(initializer([num_hid3,num_output]),dtype=tf.float32)
b1=tf.Variable(tf.zeros(num_hid1))
b2=tf.Variable(tf.zeros(num_hid2))
b3=tf.Variable(tf.zeros(num_hid3))
b4=tf.Variable(tf.zeros(num_output))
hid_layer1=actf(tf.matmul(X,w1)+b1)
hid_layer2=actf(tf.matmul(hid_layer1,w2)+b2)
hid_layer3=actf(tf.matmul(hid_layer2,w3)+b3)
output_layer=actf(tf.matmul(hid_layer3,w4)+b4)
loss=tf.reduce_mean(tf.square(output_layer-X))
optimizer=tf.train.AdamOptimizer(lr)
train=optimizer.minimize(loss)
init=tf.global_variables_initializer()
num_epoch=5
batch_size=150
num_test_images=10
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epoch):
num_batches=mnist.train.num_examples//batch_size
for iteration in range(num_batches):
X_batch,y_batch=mnist.train.next_batch(batch_size)
sess.run(train,feed_dict={X:X_batch})
train_loss=loss.eval(feed_dict={X:X_batch})
print("epoch {} loss {}".format(epoch,train_loss))
results=output_layer.eval(feed_dict={X:mnist.test.images[:num_test_images]})
#Comparing original images with reconstructions
f,a=plt.subplots(2,10,figsize=(20,4))
for i in range(num_test_images):
a[0][i].imshow(np.reshape(mnist.test.images[i],(28,28)))
a[1][i].imshow(np.reshape(results[i],(28,28)))