-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest1_getprob.py
89 lines (73 loc) · 3.73 KB
/
test1_getprob.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import os
import datetime
import pandas as pd
import numpy as np
import torch
from tools.tool import save_result_txt
from sklearn.metrics import accuracy_score
import argparse
import torch.nn.functional as F
from tools.tool import
from models.model_without_class_token_feat import SPCTSegNet, NaivePCTSegNet
def parse_args():
parser = argparse.ArgumentParser('Point Cloud Transformer Training')
parser.add_argument('--model', type=str, default='SPCTSegNet', help='model name [default: SPCTSegNet],', choices=['SPCTSegNet', 'NaivePCTSegNet'])
parser.add_argument('--part_num', type=int, default=18, help='Num of segmentation part')
parser.add_argument('--gpu', type=str, default='0', help='GPU to use [default: GPU 0]')
parser.add_argument('--data_path', type=str, default=r'./dataset/test.txt', help='Root path of dataset')
parser.add_argument('--load_checkpoint_path', type=str, default=r'./checkpoints/Best_checkpoint.pth', help='Path to loading checkpoints')
return parser.parse_args()
def test(model, xyz_test):
print(f"开始测试: {datetime.datetime.now()}")
batch_size = 500
model.eval()
batches = xyz_test.shape[0] // batch_size
if xyz_test.shape[0] % batch_size != 0:
batches += 1
preds = []
with torch.no_grad():
for i in range(batches):
xyz = xyz_test[i * batch_size : (i + 1) * batch_size]
xyz = xyz.to(device)
output = model(xyz).detach()
probs = F.softmax(output, dim=1)
preds.append(probs)
preds_cpu = [p.detach().cpu() for p in preds]
preds_np = np.concatenate(preds_cpu, axis=0)
preds_np = preds_np.reshape(preds_np.shape[0], preds_np.shape[1])
lables = preds_np.argmax(axis=1)
print(f"结束测试: {datetime.datetime.now()}")
return lables.flatten(), preds_np
if __name__ == '__main__':
args = parse_args()
data = pd.read_csv(args.data_path, header=None).values
xyz = data[:, :-1]
y = data[:, -1]
device = torch.device("cuda:" + args.gpu if torch.cuda.is_available() else "cpu")
mean = [4.11235828e+05, 3.36801308e+06, 4.21031434e+02]
std = [1666.31900171, 1161.90651266, 68.79294942]
xyz = (xyz - mean) / std
data_path = './dataset/'
bl1 = pd.read_csv(os.path.join(data_path, 'stacking_features/gbdt_stage1_test.csv'), header=None, skiprows=1).values
bl2 = pd.read_csv(os.path.join(data_path, 'stacking_features/nn_stage1_test.csv'), header=None, skiprows=1).values
bl3 = pd.read_csv(os.path.join(data_path, 'stacking_features/rf_stage1_test.csv'), header=None, skiprows=1).values
bl4 = pd.read_csv(os.path.join(data_path, 'stacking_features/xgb_stage1_test.csv'), header=None, skiprows=1).values
bl1 = np.expand_dims(bl1, axis=1)
bl2 = np.expand_dims(bl2, axis=1)
bl3 = np.expand_dims(bl3, axis=1)
bl4 = np.expand_dims(bl4, axis=1)
blp = np.repeat(xyz[:, np.newaxis, :], 4, axis=1)
blf = np.concatenate((bl1, bl2, bl3, bl4), axis=1)
blf = np.concatenate((blp, blf), axis=2)
bls = torch.tensor(np.array(blf).astype(np.float32))
bls = bls.permute(0, 2, 1)
y_test = torch.tensor(y, dtype=torch.long).unsqueeze(0).to(device)
model = eval(args.model)(args.part_num).to(device)
model.load_state_dict(torch.load(args.load_checkpoint_path, map_location='cpu')['model_state_dict'])
xyz = xyz * std + mean
pred, probs = test(model, bls)
probs = np.concatenate([data, probs], axis=1)
np.savetxt(os.path.join('results', 'predict_tianfu_probs.txt'), probs, delimiter=',')
acc = accuracy_score(pred, y.flatten())
print(f'Test acc is {acc:.6f}')
save_result_txt(xyz, pred, os.path.join('results', 'predict_tianfu_lables.txt'))