-
Notifications
You must be signed in to change notification settings - Fork 0
/
Call_back.py
136 lines (114 loc) · 5.01 KB
/
Call_back.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import numpy as np
import tensorflow.keras as keras
import tensorflow as tf
class EarlyStoppingAtMinLoss(keras.callbacks.Callback):
"""
Stop training when the loss is at its min, i.e. the loss stops decreasing.
Arguments:
patience: Number of epochs to wait after min has been hit. After this
number of no improvement, training stops.
"""
def __init__(self, weight_path='gen.h5', patience=10):
self.patience = patience
# best_weights to store the weights at which the minimum loss occurs.
self.best_weights = None
self.weight_path = weight_path
super(EarlyStoppingAtMinLoss, self).__init__()
def on_train_begin(self, logs=None):
# The number of epoch it has waited when loss is no longer minimum.
self.wait = 0
# The epoch the training stops at.
self.stopped_epoch = 0
# Initialize the best as infinity.
self.best = np.Inf
def on_epoch_end(self, epoch=0, logs=None, name='loss'):
current = logs.get(name)
if np.less(current, self.best):
self.best = current
self.wait = 0
# Record the best weights if current results is better (less).
self.best_weights = self.model.get_weights()
self.model.save_weights(self.weight_path)
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
#self.model.stop_training = True
print("Restoring model weights from the end of the best epoch.")
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
print("Epoch %05d: early stopping" % (self.stopped_epoch + 1))
class CustomLearningRateScheduler(keras.callbacks.Callback):
"""
Learning rate scheduler which sets the learning rate according to schedule.
Arguments:
schedule: a function that takes an epoch index
(integer, indexed from 0) and current learning rate
as inputs and returns a new learning rate as output (float).
"""
def __init__(self, schedule=None):
super(CustomLearningRateScheduler, self).__init__()
self.schedule = schedule
def on_epoch_begin(self, epoch=0, logs=None):
if not hasattr(self.model.optimizer, "lr"):
raise ValueError('Optimizer must have a "lr" attribute.')
# Get the current learning rate from model's optimizer.
lr = float(tf.keras.backend.get_value(self.model.optimizer.learning_rate))
# Call schedule function to get the scheduled learning rate.
scheduled_lr = self.schedule(epoch, lr)
# Set the value back to the optimizer before this epoch starts
tf.keras.backend.set_value(self.model.optimizer.learning_rate, scheduled_lr)
print("\nEpoch %05d: Learning rate is %6.4f." % (epoch, scheduled_lr))
LR_SCHEDULE = [
# (epoch to start, learning rate) tuples
(3, 0.05),
(6, 0.01),
(9, 0.005),
(12, 0.001),
]
def lr_schedule(epoch, lr):
"""
Helper function to retrieve the scheduled learning rate based on epoch.
"""
if epoch < LR_SCHEDULE[0][0] or epoch > LR_SCHEDULE[-1][0]:
return lr
for i in range(len(LR_SCHEDULE)):
if epoch == LR_SCHEDULE[i][0]:
return LR_SCHEDULE[i][1]
return lr
def named_logs(model, logs):
result = {}
logs_list = []
logs_list.append(logs)
model_list = []
model_list.append(model)
zipped = zip(model_list, logs_list)
zip_list = list(zipped)
for l in zip_list:
result[l[0]] = l[1]
return result
def tensorboard_summary(predictions, hdrs, writer, model, step_count):
with writer.as_default(), tf.contrib.summary.always_record_summaries():
tf.contrib.summary.image("G_ref", hdrs, step=step_count)
tf.contrib.summary.image("results", predictions, step=step_count)
#tf.contrib.summary.image("under_exposed", un, step=step_count)
#tf.contrib.summary.image("over_exposed", ov, step=step_count)
for layer in model.layers:
if not layer.weights:
continue
for weight, weights_numpy_array in zip(layer.weights, layer.get_weights()):
weights_name = weight.name.replace(":", "_")
tf.contrib.summary.histogram(weights_name, weights_numpy_array, step=step_count)
def record(model, bs, result_path):
"""
Create File writer, TensorBoard, and EarlyStopping before training.
"""
tensorboard_ = keras.callbacks.TensorBoard(log_dir=result_path + 'tensorboard/',batch_size=bs)
#lr_auto_gen = keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.2, cooldown=0,patience=0,mode="min", min_lr=.00001)
early_gen = EarlyStoppingAtMinLoss(weight_path='model1.h5', patience=2)
#lr_auto_gen.set_model(model)
tensorboard_.set_model(model)
early_gen.set_model(model)
writer = tf.contrib.summary.create_file_writer(result_path + 'tfsummary/')
return writer, tensorboard_, early_gen