forked from imonban/OCT_prediction
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodeltraining.py
194 lines (172 loc) · 9.04 KB
/
modeltraining.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
from sklearn.utils import class_weight
import os
from keras.preprocessing.sequence import pad_sequences
import keras.backend as K
import glob
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Embedding
from keras.layers import Input, LSTM, Dense, TimeDistributed
from keras.layers.convolutional import ZeroPadding2D
from keras import optimizers
from keras.layers.advanced_activations import LeakyReLU
from keras.callbacks import ModelCheckpoint
from keras.models import model_from_json
from keras.layers.normalization import BatchNormalization
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
from sklearn import metrics
from sklearn.preprocessing import MinMaxScaler
import random
import keras as k
import numpy as np
import keras_metrics as km
from dataprocessing import dataaugmentation, testing_data, testaugmentation, training_data
def weighted_categorical_crossentropy(weights):
"""
A weighted version of keras.objectives.categorical_crossentropy
Variables:
weights: numpy array of shape (C,) where C is the number of classes
Usage:
weights = np.array([0.5,2,10]) # Class one at 0.5, class 2 twice the normal weights, class 3 10x.
loss = weighted_categorical_crossentropy(weights)
model.compile(loss=loss,optimizer='adam')
"""
weights = K.variable(weights)
def loss(y_true, y_pred):
# scale predictions so that the class probas of each sample sum to 1
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# clip to prevent NaN's and Inf's
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
# calc
loss = y_true * K.log(y_pred) * weights
loss = -K.sum(loss, -1)
return loss
return loss
def categorical_focal_loss(gamma=2., alpha=.25):
"""
Softmax version of focal loss.
m
FL = ? -alpha * (1 - p_o,c)^gamma * y_o,c * log(p_o,c)
c=1
where m = number of classes, c = class and o = observation
Parameters:
alpha -- the same as weighing factor in balanced cross entropy
gamma -- focusing parameter for modulating factor (1-p)
Default value:
gamma -- 2.0 as mentioned in the paper
alpha -- 0.25 as mentioned in the paper
References:
Official paper: https://arxiv.org/pdf/1708.02002.pdf
https://www.tensorflow.org/api_docs/python/tf/keras/backend/categorical_crossentropy
Usage:
model.compile(loss=[categorical_focal_loss(alpha=.25, gamma=2)], metrics=["accuracy"], optimizer=adam)
"""
def categorical_focal_loss_fixed(y_true, y_pred):
"""
:param y_true: A tensor of the same shape as `y_pred`
:param y_pred: A tensor resulting from a softmax
:return: Output tensor.
"""
# Scale predictions so that the class probas of each sample sum to 1
y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
# Clip the prediction value to prevent NaN's and Inf's
epsilon = K.epsilon()
y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
# Calculate Cross Entropy
cross_entropy = -y_true * K.log(y_pred)
# Calculate Focal Loss
loss = alpha * K.pow(1 - y_pred, gamma) * cross_entropy
# Sum the losses in mini_batch
return K.sum(loss, axis=1)
return categorical_focal_loss_fixed
def focal_loss(gamma=2., alpha=.25):
def focal_loss_fixed(y_true, y_pred):
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))-K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0))
return focal_loss_fixed
def create_model(slen, num_features,n):
model = Sequential()
model.add(LSTM(n, return_sequences=True, activation='sigmoid', stateful=False, input_shape=(slen, num_features), name='LSTM_1'))
model.add(BatchNormalization(mode=0))
#model.add(LSTM(1000, return_sequences=True, activation='sigmoid', name='LSTM_2'))
model.add(Dropout(0.2))
model.add(LSTM(n, return_sequences=True, activation='sigmoid', stateful=False, name='LSTM_2'))
#model.add(Dropout(0.2))
#model.add(LSTM(25, return_sequences=True, activation='sigmoid', name='LSTM_3'))
#model.add(Activation("relu", name = 'Relu_activation'))
#model.add(LeakyReLU(alpha=0.3, name = 'LeakyRelu_activation'))
#model.add(TimeDistributed(Dense(1, activation='relu', name='dense_sigmoid'),name ='TimeDis_main_output'))
model.add(TimeDistributed(Dense(3, activation='softmax', name='Softmax'),name ='TimeDis_main_output'))
#model.add(TimeDistributed(Dense(2, activation='relu', name='dense_sigmoid'),name ='TimeDis_main_output'))
#model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer= optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0001), sample_weight_mode='temporal')
return model
def model_training(df_cov9,df_miami, m, fold, n, flag, strm):
train = df_cov9[df_cov9['Fold number']!=fold]
train = train.reset_index(drop=True)
test = df_miami
#test= df_cov9[df_cov9['Fold number']==fold]
test = test.reset_index(drop=True)
print (list(test))
temp, patients_vec_train, patients_label_train, Seq_len = training_data(train, strm)
temp, patients_vec_test, patients_label_test, Seq_len_test, row = testing_data(test, strm)
if max(Seq_len)> max(Seq_len_test):
slen = max(Seq_len)
else:
slen = max(Seq_len_test)
print('Slen'+str(slen))
X_train_aug, y_train_aug = dataaugmentation(patients_vec_train, patients_label_train)
X_train = pad_sequences(X_train_aug, slen, padding='pre', truncating='pre', value=0, dtype='float32')
Y_train = pad_sequences(y_train_aug, slen, padding='pre', truncating='pre', value=2.)
X_test = pad_sequences(patients_vec_test, slen, padding='pre', truncating='pre', value=0, dtype='float32')
Y_test = pad_sequences(patients_label_test, slen, padding='pre', truncating='pre', value=2.)
Y_categorical_train = k.utils.to_categorical(Y_train, 3)
Y_categorical_train = Y_categorical_train.reshape(Y_train.shape[0], Y_train.shape[1], 3)
Y_categorical_test = k.utils.to_categorical(Y_test, 3)
Y_categorical_test = Y_categorical_test.reshape(Y_test.shape[0], Y_train.shape[1], 3)
y_train = Y_categorical_train
y_test = Y_categorical_test
filepath="./weights/Miami"+str(m)+"monweights-improvement-{epoch:02d}-{val_precision_1:.3f}.h5py"
checkpoint = ModelCheckpoint(filepath, monitor='val_precision_1', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
num_features = X_test.shape[2]
print('num features: ')
print(num_features)
model = create_model(slen, num_features, n)
model.save('OCT_model.h5')
print('Model saved!!')
try:
wei = list(Y_test.reshape(X_test.shape[0]*slen))
print(len(wei))
class_weight = class_weight.compute_class_weight('balanced',
np.unique(wei),
wei)
weights = np.array([class_weight[0],class_weight[1],class_weight[2]])
except:
weights = np.array([1, 50, 0.1])
print(weights)
loss = weighted_categorical_crossentropy(weights)
if flag ==1:
model.compile(loss=[categorical_focal_loss(alpha=.25, gamma=2)], metrics =[km.categorical_precision(label=0),km.categorical_precision(label=1), km.categorical_recall(label=0),km.categorical_recall(label=1)], optimizer= optimizers.RMSprop(lr=0.00001, rho=0.9, epsilon=1e-08, decay=1e-6))
else:
model.compile(loss=[categorical_focal_loss(alpha=.25, gamma=2)], metrics =[km.categorical_precision(label=0),km.categorical_precision(label=1), km.categorical_recall(label=0),km.categorical_recall(label=1)], optimizer= optimizers.Adam(lr=0.001, decay=1e-6))
history = model.fit(X_train,y_train,
batch_size=64,
epochs=100,
validation_data=(X_test,y_test), callbacks=callbacks_list, shuffle=True)
list_of_files = glob.glob('./weights/*.h5py') # * means all if need specific format then *.csv
latest_file = max(list_of_files, key=os.path.getctime)
print(latest_file)
bestmodel = create_model(slen, num_features, n)
bestmodel.load_weights(latest_file)
batch_size = 50
preds_prob3mon = bestmodel.predict_proba(X_test, batch_size=batch_size)
print(preds_prob3mon.shape)
ind_preds3mon = preds_prob3mon.reshape(X_test.shape[0]*slen,3)
ind_Y_test3mon = y_test.reshape(X_test.shape[0]*slen,3)
fpr, tpr,thresholds = roc_curve(np.array(ind_Y_test3mon[ind_Y_test3mon[:,2]==0,1]), np.array(ind_preds3mon[ind_Y_test3mon[:,2]==0,1]))
roc_auc = auc(fpr, tpr)
lr_precision, lr_recall, _ = precision_recall_curve(np.array(ind_Y_test3mon[ind_Y_test3mon[:,2]==0,1]), np.array(ind_preds3mon[ind_Y_test3mon[:,2]==0,1]))
lr_auc = auc(lr_recall, lr_precision)
return fpr,tpr,roc_auc,ind_preds3mon,ind_Y_test3mon, lr_precision, lr_recall, lr_auc