forked from SuperKX/SensatUrban
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain_SensatUrban.py
296 lines (248 loc) · 15.3 KB
/
main_SensatUrban.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
from os.path import join, exists, dirname, abspath
from RandLANet import Network
from tester_SensatUrban import ModelTester
from helper_ply import read_ply
from tool import ConfigSensatUrban as cfg
from tool import DataProcessing as DP
from tool import Plot
import tensorflow as tf
import numpy as np
import time, pickle, argparse, glob, os, shutil
class SensatUrban:
def __init__(self):
self.name = 'SensatUrban'
root_path = '/home/xuekai/桌面/Randlanet/Dataset' # '/media/qingyong/data/Dataset' # path to the dataset #xuek
self.path = join(root_path, self.name)
self.label_to_names = {0: 'Ground', 1: 'High Vegetation', 2: 'Buildings', 3: 'Walls',
4: 'Bridge', 5: 'Parking', 6: 'Rail', 7: 'traffic Roads', 8: 'Street Furniture',
9: 'Cars', 10: 'Footpath', 11: 'Bikes', 12: 'Water'}
self.num_classes = len(self.label_to_names)
self.label_values = np.sort([k for k, v in self.label_to_names.items()]) # k,v 返回键和值. 此处label_value=[0,1,...,12]
self.label_to_idx = {l: i for i, l in enumerate(self.label_values)} # {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12}
self.ignored_labels = np.array([]) # 记录不参与计算的类别(此处可以控制删除类别)xuek
self.all_files = np.sort(glob.glob(join(self.path, 'original_block_ply', '*.ply')))
self.val_file_name = ['birmingham_block_1',
'birmingham_block_5',
'cambridge_block_10',
'cambridge_block_7']
self.test_file_name = ['birmingham_block_2', 'birmingham_block_8',
'cambridge_block_15', 'cambridge_block_22',
'cambridge_block_16', 'cambridge_block_27']
self.use_val = True # whether use validation set or not
# initialize 初始化
self.num_per_class = np.zeros(self.num_classes) # [13,float类型点数量]记录每个类的点数量
self.val_proj = []
self.val_labels = []
self.test_proj = [] # [6,原始点数量] 存放点索引号
self.test_labels = [] # [6,原始点数量] 存放标签序号
self.possibility = {} # [3个类(val,test,train), 每个类对应的文件数(4,6,33),降采样后的点数量] 存放一个float64
self.min_possibility = {} # [3个类(val,test,train), 每个类对应的文件数(4,6,33)]存放一个float,应该是可能性
self.input_trees = {'training': [], 'validation': [], 'test': []} # [3个类(val,test,train), 每个类对应的文件数(4,6,33)] tree数据(这里tree应该是降采样完的)
self.input_colors = {'training': [], 'validation': [], 'test': []}
self.input_labels = {'training': [], 'validation': [], 'test': []}
self.input_names = {'training': [], 'validation': [], 'test': []}
self.load_sub_sampled_clouds(cfg.sub_grid_size) # 载入降采样后数据!
for ignore_label in self.ignored_labels: # 删除掉不验证的类别。
self.num_per_class = np.delete(self.num_per_class, ignore_label)
def load_sub_sampled_clouds(self, sub_grid_size): # 载入降采样后数据!
tree_path = join(self.path, 'grid_{:.3f}'.format(sub_grid_size))
for i, file_path in enumerate(self.all_files):
t0 = time.time()
cloud_name = file_path.split('/')[-1][:-4]
if cloud_name in self.test_file_name:
cloud_split = 'test'
elif cloud_name in self.val_file_name:
cloud_split = 'validation'
else:
cloud_split = 'training'
# Name of the input files
kd_tree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))
sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name))
data = read_ply(sub_ply_file)
sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T
sub_labels = data['class']
# compute num_per_class in training set
if cloud_split == 'training':
self.num_per_class += DP.get_num_class_from_label(sub_labels, self.num_classes)
# Read pkl with search tree # 载入kdtree信息.
with open(kd_tree_file, 'rb') as f:
search_tree = pickle.load(f)
# 分别将 tree\color\label\名字 放入三个数组中.(每个数组中,分别以train\val\test 区分)
self.input_trees[cloud_split] += [search_tree]
self.input_colors[cloud_split] += [sub_colors]
self.input_labels[cloud_split] += [sub_labels]
self.input_names[cloud_split] += [cloud_name]
size = sub_colors.shape[0] * 4 * 7
print('{:s} {:.1f} MB loaded in {:.1f}s'.format(kd_tree_file.split('/')[-1], size * 1e-6, time.time() - t0))
print('\nPreparing reprojected indices for testing')
# Get validation and test reprojected indices
for i, file_path in enumerate(self.all_files):
t0 = time.time()
cloud_name = file_path.split('/')[-1][:-4]
# val projection and labels
if cloud_name in self.val_file_name:
proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
with open(proj_file, 'rb') as f:
proj_idx, labels = pickle.load(f) # pickle模块实现了基本的数据序列和反序列化
self.val_proj += [proj_idx]
self.val_labels += [labels]
print('{:s} done in {:.1f}s'.format(cloud_name, time.time() - t0))
# test projection and labels
if cloud_name in self.test_file_name:
proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
with open(proj_file, 'rb') as f:
proj_idx, labels = pickle.load(f)
self.test_proj += [proj_idx]
self.test_labels += [labels]
print('{:s} done in {:.1f}s'.format(cloud_name, time.time() - t0))
def get_batch_gen(self, split):
if split == 'training':
num_per_epoch = cfg.train_steps * cfg.batch_size
elif split == 'validation':
num_per_epoch = cfg.val_steps * cfg.val_batch_size
else:
num_per_epoch = cfg.val_steps * cfg.val_batch_size
# Reset possibility
self.possibility[split] = []
self.min_possibility[split] = []
for i, tree in enumerate(self.input_colors[split]):
self.possibility[split] += [np.random.rand(tree.data.shape[0]) * 1e-3]
self.min_possibility[split] += [float(np.min(self.possibility[split][-1]))]
def spatially_regular_gen():
# Generator loop
for i in range(num_per_epoch): # num_per_epoch
# Choose a random cloud 随机一个cloud
cloud_idx = int(np.argmin(self.min_possibility[split]))
# choose the point with the minimum of possibility as query point
point_ind = np.argmin(self.possibility[split][cloud_idx])
# Get points from tree structure # 从数中获取点
points = np.array(self.input_trees[split][cloud_idx].data, copy=False)
# Center point of input region
center_point = points[point_ind, :].reshape(1, -1)
# Add noise to the center point
noise = np.random.normal(scale=cfg.noise_init / 10, size=center_point.shape)
pick_point = center_point + noise.astype(center_point.dtype)
if len(points) < cfg.num_points:
queried_idx = self.input_trees[split][cloud_idx].query(pick_point, k=len(points))[1][0]
else:
queried_idx = self.input_trees[split][cloud_idx].query(pick_point, k=cfg.num_points)[1][0]
queried_idx = DP.shuffle_idx(queried_idx)
# Collect points and colors
queried_pc_xyz = points[queried_idx]
queried_pc_xyz = queried_pc_xyz - pick_point
queried_pc_colors = self.input_colors[split][cloud_idx][queried_idx]
queried_pc_labels = self.input_labels[split][cloud_idx][queried_idx]
dists = np.sum(np.square((points[queried_idx] - pick_point).astype(np.float32)), axis=1)
delta = np.square(1 - dists / np.max(dists))
self.possibility[split][cloud_idx][queried_idx] += delta
self.min_possibility[split][cloud_idx] = float(np.min(self.possibility[split][cloud_idx]))
if len(points) < cfg.num_points:
queried_pc_xyz, queried_pc_colors, queried_idx, queried_pc_labels = \
DP.data_aug(queried_pc_xyz, queried_pc_colors, queried_pc_labels, queried_idx, cfg.num_points)
if True:
yield (queried_pc_xyz.astype(np.float32),
queried_pc_colors.astype(np.float32),
queried_pc_labels,
queried_idx.astype(np.int32),
np.array([cloud_idx], dtype=np.int32))
gen_func = spatially_regular_gen
gen_types = (tf.float32, tf.float32, tf.int32, tf.int32, tf.int32)
gen_shapes = ([None, 3], [None, 3], [None], [None], [None])
return gen_func, gen_types, gen_shapes
@staticmethod
def get_tf_mapping2():
def tf_map(batch_xyz, batch_features, batch_labels, batch_pc_idx, batch_cloud_idx):
batch_features = tf.concat([batch_xyz, batch_features], axis=-1)
input_points = []
input_neighbors = []
input_pools = []
input_up_samples = []
for i in range(cfg.num_layers):
neighbour_idx = tf.py_func(DP.knn_search, [batch_xyz, batch_xyz, cfg.k_n], tf.int32)
sub_points = batch_xyz[:, :tf.shape(batch_xyz)[1] // cfg.sub_sampling_ratio[i], :]
pool_i = neighbour_idx[:, :tf.shape(batch_xyz)[1] // cfg.sub_sampling_ratio[i], :]
up_i = tf.py_func(DP.knn_search, [sub_points, batch_xyz, 1], tf.int32)
input_points.append(batch_xyz)
input_neighbors.append(neighbour_idx)
input_pools.append(pool_i)
input_up_samples.append(up_i)
batch_xyz = sub_points
input_list = input_points + input_neighbors + input_pools + input_up_samples
input_list += [batch_features, batch_labels, batch_pc_idx, batch_cloud_idx]
return input_list
return tf_map
def init_input_pipeline(self): # 初始化训练的数据,运算等结构xuek
print('Initiating input pipelines')
cfg.ignored_label_inds = [self.label_to_idx[ign_label] for ign_label in self.ignored_labels]
# 获取 函数、类型、形状
gen_function, gen_types, gen_shapes = self.get_batch_gen('training') # 获取train的函数、类型、形状
gen_function_val, _, _ = self.get_batch_gen('validation') # 获取validation的函数
gen_function_test, _, _ = self.get_batch_gen('test') # 获取test的函数
# 初始化数据
self.train_data = tf.data.Dataset.from_generator(gen_function, gen_types, gen_shapes)
self.val_data = tf.data.Dataset.from_generator(gen_function_val, gen_types, gen_shapes)
self.test_data = tf.data.Dataset.from_generator(gen_function_test, gen_types, gen_shapes)
# 初始化batch数据
self.batch_train_data = self.train_data.batch(cfg.batch_size)
self.batch_val_data = self.val_data.batch(cfg.val_batch_size)
self.batch_test_data = self.test_data.batch(cfg.val_batch_size)
# batch数据加载
map_func = self.get_tf_mapping2() # 初始化batch数据
self.batch_train_data = self.batch_train_data.map(map_func=map_func) # 接受一个函数对象,然后用该函数对象对集合中的每一个元素分别处理
self.batch_val_data = self.batch_val_data.map(map_func=map_func)
self.batch_test_data = self.batch_test_data.map(map_func=map_func)
# 获取一个batch大小的数据
self.batch_train_data = self.batch_train_data.prefetch(cfg.batch_size)
self.batch_val_data = self.batch_val_data.prefetch(cfg.val_batch_size)
self.batch_test_data = self.batch_test_data.prefetch(cfg.val_batch_size)
# 创建batch_train_data 迭代器
iter = tf.data.Iterator.from_structure(self.batch_train_data.output_types, self.batch_train_data.output_shapes)
self.flat_inputs = iter.get_next()
# 初始化运算
self.train_init_op = iter.make_initializer(self.batch_train_data) # 训练运算
self.val_init_op = iter.make_initializer(self.batch_val_data)
self.test_init_op = iter.make_initializer(self.batch_test_data)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='the number of GPUs to use [default: 0]')
parser.add_argument('--mode', type=str, default='train', help='options: train, test, vis')
FLAGS = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.gpu)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
Mode = FLAGS.mode
shutil.rmtree('__pycache__') if exists('__pycache__') else None
if Mode == 'train':
shutil.rmtree('results') if exists('results') else None
shutil.rmtree('train_log') if exists('train_log') else None
for f in os.listdir(dirname(abspath(__file__))):
if f.startswith('log_'):
os.remove(f)
dataset = SensatUrban() # 初始化一个sensaturban的对象
dataset.init_input_pipeline() # 初始化迭代计算方式
if Mode == 'train':
model = Network(dataset, cfg)
model.train(dataset)
elif Mode == 'test': # 测试
cfg.saving = False
model = Network(dataset, cfg)
chosen_snapshot = -1
logs = np.sort([os.path.join('results', f) for f in os.listdir('results') if f.startswith('Log')])
chosen_folder = logs[-1]
snap_path = join(chosen_folder, 'snapshots')
snap_steps = [int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path) if f[-5:] == '.meta']
chosen_step = np.sort(snap_steps)[-1]
chosen_snap = os.path.join(snap_path, 'snap-{:d}'.format(chosen_step))
tester = ModelTester(model, dataset, restore_snap=chosen_snap)
tester.test(model, dataset)
shutil.rmtree('train_log') if exists('train_log') else None # rmtree()递归地删除文件
else: # 只运行而不输入参数
with tf.Session() as sess: # tf.Session():tf.Session:创建一个新的TensorFlow会话。
sess.run(tf.global_variables_initializer()) # 将所有图变量进行集体初始化(变量的定义和初始化是分开的,所有关于图变量的赋值和计算都要通过tf.Session的run来进行)
sess.run(dataset.train_init_op) # 执行训练运算
while True:
data_list = sess.run(dataset.flat_inputs)
xyz = data_list[0]
sub_xyz = data_list[1]
label = data_list[21]
Plot.draw_pc_sem_ins(xyz[0, :, :], label[0, :])