-
Notifications
You must be signed in to change notification settings - Fork 78
/
Copy pathOOD_Regression_Mahalanobis.py
83 lines (75 loc) · 3.89 KB
/
OOD_Regression_Mahalanobis.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
"""
Created on Sun Oct 21 2018
@author: Kimin Lee
"""
from __future__ import print_function
import numpy as np
import os
import lib_regression
import argparse
from sklearn.linear_model import LogisticRegressionCV
parser = argparse.ArgumentParser(description='PyTorch code: Mahalanobis detector')
parser.add_argument('--net_type', required=True, help='resnet | densenet')
args = parser.parse_args()
print(args)
def main():
# initial setup
dataset_list = ['cifar10', 'cifar100', 'svhn']
score_list = ['Mahalanobis_0.0', 'Mahalanobis_0.01', 'Mahalanobis_0.005', 'Mahalanobis_0.002', 'Mahalanobis_0.0014', 'Mahalanobis_0.001', 'Mahalanobis_0.0005']
# train and measure the performance of Mahalanobis detector
list_best_results, list_best_results_index = [], []
for dataset in dataset_list:
print('In-distribution: ', dataset)
outf = './output/' + args.net_type + '_' + dataset + '/'
out_list = ['svhn', 'imagenet_resize', 'lsun_resize']
if dataset == 'svhn':
out_list = ['cifar10', 'imagenet_resize', 'lsun_resize']
list_best_results_out, list_best_results_index_out = [], []
for out in out_list:
print('Out-of-distribution: ', out)
best_tnr, best_result, best_index = 0, 0, 0
for score in score_list:
total_X, total_Y = lib_regression.load_characteristics(score, dataset, out, outf)
X_val, Y_val, X_test, Y_test = lib_regression.block_split(total_X, total_Y, out)
X_train = np.concatenate((X_val[:500], X_val[1000:1500]))
Y_train = np.concatenate((Y_val[:500], Y_val[1000:1500]))
X_val_for_test = np.concatenate((X_val[500:1000], X_val[1500:]))
Y_val_for_test = np.concatenate((Y_val[500:1000], Y_val[1500:]))
lr = LogisticRegressionCV(n_jobs=-1).fit(X_train, Y_train)
y_pred = lr.predict_proba(X_train)[:, 1]
#print('training mse: {:.4f}'.format(np.mean(y_pred - Y_train)))
y_pred = lr.predict_proba(X_val_for_test)[:, 1]
#print('test mse: {:.4f}'.format(np.mean(y_pred - Y_val_for_test)))
results = lib_regression.detection_performance(lr, X_val_for_test, Y_val_for_test, outf)
if best_tnr < results['TMP']['TNR']:
best_tnr = results['TMP']['TNR']
best_index = score
best_result = lib_regression.detection_performance(lr, X_test, Y_test, outf)
list_best_results_out.append(best_result)
list_best_results_index_out.append(best_index)
list_best_results.append(list_best_results_out)
list_best_results_index.append(list_best_results_index_out)
# print the results
count_in = 0
mtypes = ['TNR', 'AUROC', 'DTACC', 'AUIN', 'AUOUT']
for in_list in list_best_results:
print('in_distribution: ' + dataset_list[count_in] + '==========')
out_list = ['svhn', 'imagenet_resize', 'lsun_resize']
if dataset_list[count_in] == 'svhn':
out_list = ['cifar10', 'imagenet_resize', 'lsun_resize']
count_out = 0
for results in in_list:
print('out_distribution: '+ out_list[count_out])
for mtype in mtypes:
print(' {mtype:6s}'.format(mtype=mtype), end='')
print('\n{val:6.2f}'.format(val=100.*results['TMP']['TNR']), end='')
print(' {val:6.2f}'.format(val=100.*results['TMP']['AUROC']), end='')
print(' {val:6.2f}'.format(val=100.*results['TMP']['DTACC']), end='')
print(' {val:6.2f}'.format(val=100.*results['TMP']['AUIN']), end='')
print(' {val:6.2f}\n'.format(val=100.*results['TMP']['AUOUT']), end='')
print('Input noise: ' + list_best_results_index[count_in][count_out])
print('')
count_out += 1
count_in += 1
if __name__ == '__main__':
main()