-
Notifications
You must be signed in to change notification settings - Fork 182
/
vae.py
97 lines (73 loc) · 3.53 KB
/
vae.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import tensorflow as tf
# Gaussian MLP as encoder
def gaussian_MLP_encoder(x, n_hidden, n_output, keep_prob):
with tf.variable_scope("gaussian_MLP_encoder"):
# initializers
w_init = tf.contrib.layers.variance_scaling_initializer()
b_init = tf.constant_initializer(0.)
# 1st hidden layer
w0 = tf.get_variable('w0', [x.get_shape()[1], n_hidden], initializer=w_init)
b0 = tf.get_variable('b0', [n_hidden], initializer=b_init)
h0 = tf.matmul(x, w0) + b0
h0 = tf.nn.elu(h0)
h0 = tf.nn.dropout(h0, keep_prob)
# 2nd hidden layer
w1 = tf.get_variable('w1', [h0.get_shape()[1], n_hidden], initializer=w_init)
b1 = tf.get_variable('b1', [n_hidden], initializer=b_init)
h1 = tf.matmul(h0, w1) + b1
h1 = tf.nn.tanh(h1)
h1 = tf.nn.dropout(h1, keep_prob)
# output layer
# borrowed from https: // github.com / altosaar / vae / blob / master / vae.py
wo = tf.get_variable('wo', [h1.get_shape()[1], n_output * 2], initializer=w_init)
bo = tf.get_variable('bo', [n_output * 2], initializer=b_init)
gaussian_params = tf.matmul(h1, wo) + bo
# The mean parameter is unconstrained
mean = gaussian_params[:, :n_output]
# The standard deviation must be positive. Parametrize with a softplus and
# add a small epsilon for numerical stability
stddev = 1e-6 + tf.nn.softplus(gaussian_params[:, n_output:])
return mean, stddev
# Bernoulli MLP as decoder
def bernoulli_MLP_decoder(z, n_hidden, n_output, keep_prob, reuse=False):
with tf.variable_scope("bernoulli_MLP_decoder", reuse=reuse):
# initializers
w_init = tf.contrib.layers.variance_scaling_initializer()
b_init = tf.constant_initializer(0.)
# 1st hidden layer
w0 = tf.get_variable('w0', [z.get_shape()[1], n_hidden], initializer=w_init)
b0 = tf.get_variable('b0', [n_hidden], initializer=b_init)
h0 = tf.matmul(z, w0) + b0
h0 = tf.nn.tanh(h0)
h0 = tf.nn.dropout(h0, keep_prob)
# 2nd hidden layer
w1 = tf.get_variable('w1', [h0.get_shape()[1], n_hidden], initializer=w_init)
b1 = tf.get_variable('b1', [n_hidden], initializer=b_init)
h1 = tf.matmul(h0, w1) + b1
h1 = tf.nn.elu(h1)
h1 = tf.nn.dropout(h1, keep_prob)
# output layer-mean
wo = tf.get_variable('wo', [h1.get_shape()[1], n_output], initializer=w_init)
bo = tf.get_variable('bo', [n_output], initializer=b_init)
y = tf.sigmoid(tf.matmul(h1, wo) + bo)
return y
# Gateway
def autoencoder(x_hat, x, dim_img, dim_z, n_hidden, keep_prob):
# encoding
mu, sigma = gaussian_MLP_encoder(x_hat, n_hidden, dim_z, keep_prob)
# sampling by re-parameterization technique
z = mu + sigma * tf.random_normal(tf.shape(mu), 0, 1, dtype=tf.float32)
# decoding
y = bernoulli_MLP_decoder(z, n_hidden, dim_img, keep_prob)
y = tf.clip_by_value(y, 1e-8, 1 - 1e-8)
# loss
marginal_likelihood = tf.reduce_sum(x * tf.log(y) + (1 - x) * tf.log(1 - y), 1)
KL_divergence = 0.5 * tf.reduce_sum(tf.square(mu) + tf.square(sigma) - tf.log(1e-8 + tf.square(sigma)) - 1, 1)
marginal_likelihood = tf.reduce_mean(marginal_likelihood)
KL_divergence = tf.reduce_mean(KL_divergence)
ELBO = marginal_likelihood - KL_divergence
loss = -ELBO
return y, z, loss, -marginal_likelihood, KL_divergence
def decoder(z, dim_img, n_hidden):
y = bernoulli_MLP_decoder(z, n_hidden, dim_img, 1.0, reuse=True)
return y