-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmetrics.py
83 lines (70 loc) · 3.12 KB
/
metrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
def iou(y_true, y_pred):
def f(y_true, y_pred):
intersection = (y_true * y_pred).sum()
union = y_true.sum() + y_pred.sum() - intersection
x = (intersection + smooth) / (union + smooth)
x = x.astype(np.float32)
return x
return tf.numpy_function(f, [y_true, y_pred], tf.float32)
smooth = K.epsilon()
def dice_coef(y_true, y_pred):
y_true = tf.keras.layers.Flatten()(y_true)
y_pred = tf.keras.layers.Flatten()(y_pred)
intersection = K.sum(y_true * y_pred)
return (2. * intersection + smooth) / (K.sum(y_true) + K.sum(y_pred) + smooth)
def dice_loss(y_true, y_pred):
return 1.0 - dice_coef(y_true, y_pred)
def logcoshDice(y_true,y_pred):
dice = dice_coef(y_true,y_pred)
diceloss = 1-dice
return K.log((K.exp(diceloss)+K.exp(-diceloss))/2.0) # log of cosh of dice loss
# Performance metrics: Dice score coefficient, IOU, recall, sensitivity
def auc(y_true, y_pred):
y_pred_pos = np.round(np.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = np.round(np.clip(y_true, 0, 1)) # ground truth
y_neg = 1 - y_pos
tp = np.sum(y_pos * y_pred_pos)
tn = np.sum(y_neg * y_pred_neg)
fp = np.sum(y_neg * y_pred_pos)
fn = np.sum(y_pos * y_pred_neg)
tpr = (tp + K.epsilon()) / (tp + fn + K.epsilon()) #recall
tnr = (tn + K.epsilon()) / (tn + fp + K.epsilon())
prec = (tp + K.epsilon()) / (tp + fp + K.epsilon()) #precision
f1 = (2* (prec * tpr)) / (prec + tpr)
iou = (tp + K.epsilon()) / (tp + fn + fp + K.epsilon()) #intersection over union
dsc = (2*tp + K.epsilon()) / (2*tp + fn + fp + K.epsilon()) #dice score
return [dsc, iou, tpr, prec, f1]
def focal_loss_non_weight(gamma=2., alpha=4.):
gamma = float(gamma)
alpha = float(alpha)
def focal_loss_fixed(y_true, y_pred):
"""Focal loss for multi-classification
FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t)
Notice: y_pred is probability after softmax
gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper
d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x)
Focal Loss for Dense Object Detection
https://arxiv.org/abs/1708.02002
Arguments:
y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls]
y_pred {tensor} -- model's output, shape of [batch_size, num_cls]
Keyword Arguments:
gamma {float} -- (default: {2.0})
alpha {float} -- (default: {4.0})
Returns:
[tensor] -- loss.
"""
epsilon = 1.e-9
y_true = tf.convert_to_tensor(y_true, tf.float32)
y_pred = tf.convert_to_tensor(y_pred, tf.float32)
model_out = tf.add(y_pred, epsilon)
ce = tf.multiply(y_true, -tf.math.log(model_out))
weight = tf.multiply(y_true, tf.pow(tf.subtract(1., model_out), gamma))
fl = tf.multiply(alpha, tf.multiply(weight, ce))
reduced_fl = tf.reduce_max(fl, axis=1)
return tf.reduce_mean(reduced_fl)
return focal_loss_fixed