This repository was archived by the owner on Apr 16, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathevaluation.py
82 lines (66 loc) · 2.73 KB
/
evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import argparse
import pathlib
import importlib
from conf import default, general, paths
import os
import time
import sys
from utils.dataloader import TreePredDataSet
from torch.utils.data import DataLoader
import torch
from tqdm import tqdm
import numpy as np
from osgeo import ogr, gdal, gdalconst
from sklearn.metrics import f1_score, accuracy_score, classification_report
from utils.ops import load_dict
parser = argparse.ArgumentParser(
description='Train NUMBER_MODELS models based in the same parameters'
)
parser.add_argument( # Experiment number
'-e', '--experiment',
type = int,
default = 1,
help = 'The number of the experiment'
)
parser.add_argument( # Experiment path
'-x', '--experiments-path',
type = pathlib.Path,
default = paths.PATH_EXPERIMENTS,
help = 'The patch to data generated by all experiments'
)
parser.add_argument( # Base image to generate geotiff pred
'-i', '--base-image',
type = pathlib.Path,
default = paths.PATH_IMG,
help = 'The patch to base image to generate Geotiff prediction'
)
args = parser.parse_args()
exp_path = os.path.join(str(args.experiments_path), f'exp_{args.experiment}')
logs_path = os.path.join(exp_path, f'logs')
models_path = os.path.join(exp_path, f'models')
visual_path = os.path.join(exp_path, f'visual')
predicted_path = os.path.join(exp_path, f'predicted')
results_path = os.path.join(exp_path, f'results')
outfile = os.path.join(logs_path, f'eval_{args.experiment}.txt')
with open(outfile, 'w') as sys.stdout:
pred = np.load(os.path.join(predicted_path, 'pred.npy')).flatten()
label = np.load(os.path.join(paths.PREPARED_PATH, f'{general.PREFIX_LABEL}_test.npy')).flatten()
remap_dict = load_dict(os.path.join(paths.PREPARED_PATH, 'map.data'))
print(remap_dict)
keep_label_9 = label != general.DISCARDED_CLASS
keep_label_0 = label != 0
keep_pred_0 = pred != 0
keep = np.all([keep_label_9, keep_label_0, keep_pred_0], axis=0)
pred = pred[keep]
label = label[keep]
for class_id in tqdm(range(general.N_CLASSES)):
tp = (pred[label==class_id] == class_id).sum()
tn = (pred[label!=class_id] != class_id).sum()
fp = (pred[label!=class_id] == class_id).sum()
fn = (pred[label==class_id] != class_id).sum()
recall = tp / (tp + fn)
precision = tp / (tp + fp)
f1 = 2 * precision * recall / (precision + recall)
acc = (tp + tn) / (tp + tn + fp + fn )
#f1 = f1_score((label == class_id).astype(np.uint8), (pred==class_id).astype(np.uint8), average='binary')
print(f'Class {class_id}: Accuracy={100*acc:.2f}, F1-Score={100*f1:.2f}, Precision={100*precision:.2f}, Recall={100*recall:.2f}, N of Labels samples: {tp + fn}, N of Predicted samples: {tp + fp}')