Skip to content

Commit

Permalink
remove batch normalization because don't use in these examples, not r…
Browse files Browse the repository at this point in the history
…ecently tested
  • Loading branch information
Bethany Lusch authored and Bethany Lusch committed Oct 13, 2019
1 parent 1a5cc56 commit 10e833a
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 24 deletions.
3 changes: 0 additions & 3 deletions helperfns.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,9 +329,6 @@ def set_defaults(params):
if 'act_type' not in params:
print("setting default: activation function is ReLU")
params['act_type'] = 'relu'
if 'batch_flag' not in params:
print("setting default: no batch normalization")
params['batch_flag'] = 0

if 'num_evals' not in params:
raise KeyError("Error, must give number of evals: num_evals")
Expand Down
29 changes: 8 additions & 21 deletions networkarch.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,15 +105,14 @@ def encoder(widths, dist_weights, dist_biases, scale, num_shifts_max, first_gues
return x, weights, biases


def encoder_apply(x, weights, biases, act_type, batch_flag, shifts_middle, name='E', num_encoder_weights=1):
def encoder_apply(x, weights, biases, act_type, shifts_middle, name='E', num_encoder_weights=1):
"""Apply an encoder to data x.
Arguments:
x -- placeholder for input
weights -- dictionary of weights
biases -- dictionary of biases
act_type -- string for activation type for nonlinear layers (i.e. sigmoid, relu, or elu)
batch_flag -- 0 if no batch_normalization, 1 if batch_normalization
shifts_middle -- number of shifts (steps) in x to apply encoder to for linearity loss
name -- string for prefix on weight matrices (default 'E' for encoder)
num_encoder_weights -- number of weight matrices (layers) in encoder network (default 1)
Expand All @@ -136,19 +135,18 @@ def encoder_apply(x, weights, biases, act_type, batch_flag, shifts_middle, name=
else:
x_shift = tf.squeeze(x[shift, :, :])
y.append(
encoder_apply_one_shift(x_shift, weights, biases, act_type, batch_flag, name, num_encoder_weights))
encoder_apply_one_shift(x_shift, weights, biases, act_type, name, num_encoder_weights))
return y


def encoder_apply_one_shift(prev_layer, weights, biases, act_type, batch_flag, name='E', num_encoder_weights=1):
def encoder_apply_one_shift(prev_layer, weights, biases, act_type, name='E', num_encoder_weights=1):
"""Apply an encoder to data for only one time step (shift).
Arguments:
prev_layer -- input for a particular time step (shift)
weights -- dictionary of weights
biases -- dictionary of biases
act_type -- string for activation type for nonlinear layers (i.e. sigmoid, relu, or elu)
batch_flag -- 0 if no batch_normalization, 1 if batch_normalization
name -- string for prefix on weight matrices (default 'E' for encoder)
num_encoder_weights -- number of weight matrices (layers) in encoder network (default 1)
Expand All @@ -160,8 +158,6 @@ def encoder_apply_one_shift(prev_layer, weights, biases, act_type, batch_flag, n
"""
for i in np.arange(num_encoder_weights - 1):
prev_layer = tf.matmul(prev_layer, weights['W%s%d' % (name, i + 1)]) + biases['b%s%d' % (name, i + 1)]
if batch_flag:
prev_layer = tf.contrib.layers.batch_norm(prev_layer)
if act_type == 'sigmoid':
prev_layer = tf.sigmoid(prev_layer)
elif act_type == 'relu':
Expand All @@ -173,9 +169,6 @@ def encoder_apply_one_shift(prev_layer, weights, biases, act_type, batch_flag, n
final = tf.matmul(prev_layer, weights['W%s%d' % (name, num_encoder_weights)]) + biases[
'b%s%d' % (name, num_encoder_weights)]

if batch_flag:
final = tf.contrib.layers.batch_norm(final)

return final


Expand Down Expand Up @@ -210,15 +203,14 @@ def decoder(widths, dist_weights, dist_biases, scale, name='D', first_guess=0):
return weights, biases


def decoder_apply(prev_layer, weights, biases, act_type, batch_flag, num_decoder_weights):
def decoder_apply(prev_layer, weights, biases, act_type, num_decoder_weights):
"""Apply a decoder to data prev_layer
Arguments:
prev_layer -- input to decoder network
weights -- dictionary of weights
biases -- dictionary of biases
act_type -- string for activation type for nonlinear layers (i.e. sigmoid, relu, or elu)
batch_flag -- 0 if no batch_normalization, 1 if batch_normalization
num_decoder_weights -- number of weight matrices (layers) in decoder network
Returns:
Expand All @@ -229,8 +221,6 @@ def decoder_apply(prev_layer, weights, biases, act_type, batch_flag, num_decoder
"""
for i in np.arange(num_decoder_weights - 1):
prev_layer = tf.matmul(prev_layer, weights['WD%d' % (i + 1)]) + biases['bD%d' % (i + 1)]
if batch_flag:
prev_layer = tf.contrib.layers.batch_norm(prev_layer)
if act_type == 'sigmoid':
prev_layer = tf.sigmoid(prev_layer)
elif act_type == 'relu':
Expand Down Expand Up @@ -421,7 +411,7 @@ def omega_net_apply_one(params, ycoords, weights, biases, name):
None
"""

omegas = encoder_apply_one_shift(ycoords, weights, biases, params['act_type'], params['batch_flag'], name=name,
omegas = encoder_apply_one_shift(ycoords, weights, biases, params['act_type'], name=name,
num_encoder_weights=params['num_omega_weights'])
return omegas

Expand Down Expand Up @@ -453,8 +443,7 @@ def create_koopman_net(params):
dist_biases=params['dist_biases'][0:depth + 1], scale=params['scale'],
num_shifts_max=max_shifts_to_stack, first_guess=params['first_guess'])
params['num_encoder_weights'] = len(weights)
g_list = encoder_apply(x, weights, biases, params['act_type'], params['batch_flag'],
shifts_middle=params['shifts_middle'],
g_list = encoder_apply(x, weights, biases, params['act_type'], shifts_middle=params['shifts_middle'],
num_encoder_weights=params['num_encoder_weights'])

# g_list_omega is list of omegas, one entry for each middle_shift of x (like g_list)
Expand All @@ -475,8 +464,7 @@ def create_koopman_net(params):
# y[0] is x[0,:,:] encoded and then decoded (no stepping forward)
encoded_layer = g_list[0]
params['num_decoder_weights'] = depth + 1
y.append(decoder_apply(encoded_layer, weights, biases, params['act_type'], params['batch_flag'],
params['num_decoder_weights']))
y.append(decoder_apply(encoded_layer, weights, biases, params['act_type'], params['num_decoder_weights']))

# g_list_omega[0] is for x[0,:,:], pairs with g_list[0]=encoded_layer
advanced_layer = varying_multiply(encoded_layer, omegas, params['delta_t'], params['num_real'],
Expand All @@ -485,8 +473,7 @@ def create_koopman_net(params):
for j in np.arange(max(params['shifts'])):
# considering penalty on subset of yk+1, yk+2, yk+3, ...
if (j + 1) in params['shifts']:
y.append(decoder_apply(advanced_layer, weights, biases, params['act_type'], params['batch_flag'],
params['num_decoder_weights']))
y.append(decoder_apply(advanced_layer, weights, biases, params['act_type'], params['num_decoder_weights']))

omegas = omega_net_apply(params, advanced_layer, weights, biases)
advanced_layer = varying_multiply(advanced_layer, omegas, params['delta_t'], params['num_real'],
Expand Down

0 comments on commit 10e833a

Please sign in to comment.