Skip to content
This repository has been archived by the owner on Feb 17, 2022. It is now read-only.

Sourcery Starbot ⭐ refactored YuhaoCheng/PyAnomaly #13

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,7 @@ def main(args, cfg, logger, cfg_name, time_stamp, is_training):
logger.info('Finish Using the anomaly detection service')

def make_result(result_dict, video_path):
result_image = None

return result_image
return None
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function make_result refactored with the following changes:


if __name__ == '__main__':
args = parse_args()
Expand Down
3 changes: 1 addition & 2 deletions pyanomaly/core/engine/abstract/abstract_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,7 @@ def data_parallel(self, model):
"""
logger.info('<!_!> ==> Data Parallel')
gpus = [int(i) for i in self.engine_gpus]
model_parallel = torch.nn.DataParallel(model.cuda(), device_ids=gpus)
return model_parallel
return torch.nn.DataParallel(model.cuda(), device_ids=gpus)
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function AbstractEngine.data_parallel refactored with the following changes:



def _load_file(self, model_keys, model_file):
Expand Down
22 changes: 10 additions & 12 deletions pyanomaly/core/engine/abstract/base_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,16 +200,15 @@ def fine_tune(self):
for n, p in self.model.named_parameters():
parts = n.split('.')
# consider the data parallel situation
if parts[0] == 'module':
if parts[1] not in layer_list:
p.requires_grad = False
if p.requires_grad:
print(n)
else:
if parts[0] not in layer_list:
p.requires_grad = False
if p.requires_grad:
print(n)
if (
parts[0] == 'module'
and parts[1] not in layer_list
or parts[0] != 'module'
and parts[0] not in layer_list
):
p.requires_grad = False
if p.requires_grad:
print(n)
Comment on lines -203 to +211
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function BaseTrainer.fine_tune refactored with the following changes:

self.logger.info('Finish Setting freeze layers')

def data_parallel(self, model):
Expand All @@ -222,8 +221,7 @@ def data_parallel(self, model):
"""
logger.info('<!_!> ==> Data Parallel')
gpus = [int(i) for i in self.config.SYSTEM.gpus]
model_parallel = torch.nn.DataParallel(model.cuda(), device_ids=gpus)
return model_parallel
return torch.nn.DataParallel(model.cuda(), device_ids=gpus)
Comment on lines -225 to +224
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function BaseTrainer.data_parallel refactored with the following changes:



def after_step(self, current_step):
Expand Down
6 changes: 1 addition & 5 deletions pyanomaly/core/engine/engine_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,7 @@ def __init__(self, cfg, is_training):
self.cfg = cfg
self.model_name = self.cfg.MODEL.name
self.is_training = is_training
if self.is_training:
self.phase = 'TRAIN'
else:
self.phase = 'VAL'

self.phase = 'TRAIN' if self.is_training else 'VAL'
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function EngineAPI.__init__ refactored with the following changes:

self.engine_name = self.cfg.get(self.phase)['engine_name']

def build(self):
Expand Down
4 changes: 1 addition & 3 deletions pyanomaly/core/engine/functions/amc.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,13 +163,11 @@ def custom_setup(self):
self.wf = 1.0
self.wi = 1.0
self.threshold = 0.0 # the threshold to judge whether the frame is the anomaly
pass
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function AMCService.custom_setup refactored with the following changes:


def get_clip_by_stride(self, video, stride=2):
"""Get the clip list by the stride
"""
clip_list = []
return clip_list
return []
Comment on lines -171 to +170
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function AMCService.get_clip_by_stride refactored with the following changes:


def execute(self, data):
output_dict = OrderedDict()
Expand Down
4 changes: 1 addition & 3 deletions pyanomaly/core/engine/functions/ma.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,13 +172,11 @@ def custom_setup(self):
self.wf = 1.0
self.wi = 1.0
self.threshold = 0.0 # the threshold to judge whether the frame is the anomaly
pass
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function AMCService.custom_setup refactored with the following changes:


def get_clip_by_stride(self, video, stride=2):
"""Get the clip list by the stride
"""
clip_list = []
return clip_list
return []
Comment on lines -180 to +179
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function AMCService.get_clip_by_stride refactored with the following changes:


def execute(self, data):
output_dict = OrderedDict()
Expand Down
4 changes: 0 additions & 4 deletions pyanomaly/core/hook/abstract/abstract_hook.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,6 @@ def after_step(self, current_step):
# save the checkpoint
self.engine.save(current_step)
self.engine.logger.info('LOL==>the accuracy is not imporved in epcoh{} but save'.format(current_step))
else:
pass
else:
pass
Comment on lines -77 to -80
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function EvaluateHook.after_step refactored with the following changes:


def inference(self):
acc = self.evaluate(0)
Expand Down
183 changes: 94 additions & 89 deletions pyanomaly/core/hook/functions/ocae_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,97 +37,102 @@
class ClusterHook(HookBase):
def after_step(self, current_step):
# import ipdb; ipdb.set_trace()
if current_step % self.engine.config.TRAIN.eval_step == 0 and current_step!= 0:
self.engine.logger.info('Start clsuter the feature')
frame_num = self.engine.config.DATASET.train.clip_length
frame_step = self.engine.config.DATASET.train.clip_step
feature_record = []
for video_name in self.engine.cluster_dataset_keys:
dataset = self.engine.cluster_dataset_dict[video_name]
data_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1)
# import ipdb; ipdb.set_trace()
for test_input, anno, meta in data_loader:
future = data[:, :, 2, :, :].cuda() # t+1 frame
current = data[:, :, 1, :, :].cuda() # t frame
past = data[:, :, 0, :, :].cuda() # t frame
bboxs = get_batch_dets(self.engine.Detector, current)
for index, bbox in enumerate(bboxs):
# import ipdb; ipdb.set_trace()
if bbox.numel() == 0:
# import ipdb; ipdb.set_trace()
# bbox = torch.zeros([1,4])
bbox = bbox.new_zeros([1,4])
# print('NO objects')
# continue
# import ipdb; ipdb.set_trace()
current_object, _ = multi_obj_grid_crop(current[index], bbox)
future_object, _ = multi_obj_grid_crop(future[index], bbox)
future2current = torch.stack([future_object, current_object], dim=1)

past_object, _ = multi_obj_grid_crop(past[index], bbox)
current2past = torch.stack([current_object, past_object], dim=1)

_, _, A_input = frame_gradient(future2current)
A_input = A_input.sum(1)
_, _, C_input = frame_gradient(current2past)
C_input = C_input.sum(1)
A_feature, _, _ = self.engine.A(A_input)
B_feature, _, _ = self.engine.B(current_object)
C_feature, _, _ = self.engine.C(C_input)

A_flatten_feature = A_feature.flatten(start_dim=1)
B_flatten_feature = B_feature.flatten(start_dim=1)
C_flatten_feature = C_feature.flatten(start_dim=1)
ABC_feature = torch.cat([A_flatten_feature, B_flatten_feature, C_flatten_feature], dim=1).detach()
# import ipdb; ipdb.set_trace()
ABC_feature_s = torch.chunk(ABC_feature, ABC_feature.size(0), dim=0)
# feature_record.extend(ABC_feature_s)
for abc_f in ABC_feature_s:
temp = abc_f.squeeze(0).cpu().numpy()
feature_record.append(temp)
# import ipdb; ipdb.set_trace()
self.engine.logger.info(f'Finish the video:{video_name}')
self.engine.logger.info(f'Finish extract feature, the sample:{len(feature_record)}')
device = torch.device('cuda:0')
cluster_input = torch.from_numpy(np.array(feature_record))
# cluster_input = np.array(feature_record)
time = mmcv.Timer()
# import ipdb; ipdb.set_trace()
cluster_centers = cluster_input.new_zeros(size=[self.engine.config.TRAIN.cluster.k, 3072])
cluster_score = 0.0
cluster_model = None
for _ in range(1):
# model = KMeans(n_clusters=self.trainer.config.TRAIN.cluster.k, init='k-means++',n_init=10, algorithm='full',max_iter=300).fit(cluster_input)
# labels = model.labels_
# temp = calinski_harabaz_score(cluster_input, labels)
# if temp > cluster_score:
# cluster_model = model
# print(f'the temp score is {temp}')
cluster_ids_x, cluster_center = kmeans(X=cluster_input, num_clusters=self.engine.config.TRAIN.cluster.k, distance='euclidean', device=device)
cluster_centers += cluster_center
# import ipdb; ipdb.set_trace()
# cluster_centers = cluster_centers / 10
# model.fit(cluster_input)
# pusedo_labels = model.predict(cluster_input)
pusedo_labels = kmeans_predict(cluster_input, cluster_centers, 'euclidean', device=device).detach().cpu().numpy()
# pusedo_labels = cluster_model.labels_
print(f'The cluster time is :{time.since_start()/60} min')
if (
current_step % self.engine.config.TRAIN.eval_step != 0
or current_step == 0
):
return

self.engine.logger.info('Start clsuter the feature')
frame_num = self.engine.config.DATASET.train.clip_length
frame_step = self.engine.config.DATASET.train.clip_step
feature_record = []
for video_name in self.engine.cluster_dataset_keys:
dataset = self.engine.cluster_dataset_dict[video_name]
data_loader = DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=1)
# import ipdb; ipdb.set_trace()
# pusedo_labels = np.split(pusedo_labels, pusedo_labels.shape[0], 0)
for test_input, anno, meta in data_loader:
future = data[:, :, 2, :, :].cuda() # t+1 frame
current = data[:, :, 1, :, :].cuda() # t frame
past = data[:, :, 0, :, :].cuda() # t frame
bboxs = get_batch_dets(self.engine.Detector, current)
for index, bbox in enumerate(bboxs):
# import ipdb; ipdb.set_trace()
if bbox.numel() == 0:
# import ipdb; ipdb.set_trace()
# bbox = torch.zeros([1,4])
bbox = bbox.new_zeros([1,4])
# print('NO objects')
# continue
# import ipdb; ipdb.set_trace()
current_object, _ = multi_obj_grid_crop(current[index], bbox)
future_object, _ = multi_obj_grid_crop(future[index], bbox)
future2current = torch.stack([future_object, current_object], dim=1)

pusedo_dataset = os.path.join(self.engine.config.TRAIN.pusedo_data_path, 'pusedo')
if not os.path.exists(pusedo_dataset):
os.mkdir(pusedo_dataset)

np.savez_compressed(os.path.join(pusedo_dataset, f'{self.engine.config.DATASET.name}_dummy.npz'), data=cluster_input, label=pusedo_labels)
print(f'The save time is {time.since_last_check() / 60} min')
# binary_labels = MultiLabelBinarizer().fit_transform(pusedo_labels)
# self.trainer.ovr_model = OneVsRestClassifier(LinearSVC(random_state = 0)).fit(cluster_input,binary_labels)
# self.trainer.ovr_model = OneVsRestClassifier(LinearSVC(random_state = 0), n_jobs=16).fit(cluster_input, pusedo_labels)
self.engine.ovr_model = self.engine.ovr_model.fit(cluster_input, pusedo_labels)
# self.trainer.saved_model['OVR'] = self.trainer.ovr_model
print(f'The train ovr: {time.since_last_check() / 60} min')
joblib.dump(self.engine.ovr_model, self.engine.ovr_model_path)
past_object, _ = multi_obj_grid_crop(past[index], bbox)
current2past = torch.stack([current_object, past_object], dim=1)

_, _, A_input = frame_gradient(future2current)
A_input = A_input.sum(1)
_, _, C_input = frame_gradient(current2past)
C_input = C_input.sum(1)
A_feature, _, _ = self.engine.A(A_input)
B_feature, _, _ = self.engine.B(current_object)
C_feature, _, _ = self.engine.C(C_input)

A_flatten_feature = A_feature.flatten(start_dim=1)
B_flatten_feature = B_feature.flatten(start_dim=1)
C_flatten_feature = C_feature.flatten(start_dim=1)
ABC_feature = torch.cat([A_flatten_feature, B_flatten_feature, C_flatten_feature], dim=1).detach()
# import ipdb; ipdb.set_trace()
ABC_feature_s = torch.chunk(ABC_feature, ABC_feature.size(0), dim=0)
# feature_record.extend(ABC_feature_s)
for abc_f in ABC_feature_s:
temp = abc_f.squeeze(0).cpu().numpy()
feature_record.append(temp)
# import ipdb; ipdb.set_trace()
self.engine.logger.info(f'Finish the video:{video_name}')
self.engine.logger.info(f'Finish extract feature, the sample:{len(feature_record)}')
device = torch.device('cuda:0')
cluster_input = torch.from_numpy(np.array(feature_record))
# cluster_input = np.array(feature_record)
time = mmcv.Timer()
# import ipdb; ipdb.set_trace()
cluster_centers = cluster_input.new_zeros(size=[self.engine.config.TRAIN.cluster.k, 3072])
cluster_score = 0.0
cluster_model = None
for _ in range(1):
# model = KMeans(n_clusters=self.trainer.config.TRAIN.cluster.k, init='k-means++',n_init=10, algorithm='full',max_iter=300).fit(cluster_input)
# labels = model.labels_
# temp = calinski_harabaz_score(cluster_input, labels)
# if temp > cluster_score:
# cluster_model = model
# print(f'the temp score is {temp}')
cluster_ids_x, cluster_center = kmeans(X=cluster_input, num_clusters=self.engine.config.TRAIN.cluster.k, distance='euclidean', device=device)
cluster_centers += cluster_center
# import ipdb; ipdb.set_trace()
# cluster_centers = cluster_centers / 10
# model.fit(cluster_input)
# pusedo_labels = model.predict(cluster_input)
pusedo_labels = kmeans_predict(cluster_input, cluster_centers, 'euclidean', device=device).detach().cpu().numpy()
# pusedo_labels = cluster_model.labels_
print(f'The cluster time is :{time.since_start()/60} min')
# import ipdb; ipdb.set_trace()
# pusedo_labels = np.split(pusedo_labels, pusedo_labels.shape[0], 0)

pusedo_dataset = os.path.join(self.engine.config.TRAIN.pusedo_data_path, 'pusedo')
if not os.path.exists(pusedo_dataset):
os.mkdir(pusedo_dataset)

np.savez_compressed(os.path.join(pusedo_dataset, f'{self.engine.config.DATASET.name}_dummy.npz'), data=cluster_input, label=pusedo_labels)
print(f'The save time is {time.since_last_check() / 60} min')
# binary_labels = MultiLabelBinarizer().fit_transform(pusedo_labels)
# self.trainer.ovr_model = OneVsRestClassifier(LinearSVC(random_state = 0)).fit(cluster_input,binary_labels)
# self.trainer.ovr_model = OneVsRestClassifier(LinearSVC(random_state = 0), n_jobs=16).fit(cluster_input, pusedo_labels)
self.engine.ovr_model = self.engine.ovr_model.fit(cluster_input, pusedo_labels)
# self.trainer.saved_model['OVR'] = self.trainer.ovr_model
print(f'The train ovr: {time.since_last_check() / 60} min')
joblib.dump(self.engine.ovr_model, self.engine.ovr_model_path)
Comment on lines -40 to +135
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function ClusterHook.after_step refactored with the following changes:

# import ipdb; ipdb.set_trace()

@HOOK_REGISTRY.register()
Expand Down
10 changes: 4 additions & 6 deletions pyanomaly/core/optimizer/optimizer_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def _build_one_optimizer(self, model):
return t

def _build_multi_optimizers(self, model_list):
param_groups = list()
param_groups = []
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function OptimizerAPI._build_multi_optimizers refactored with the following changes:


if self.type not in OptimizerAPI._SUPPROT:
raise Exception(f'Not support: {self.type} in {OptimizerAPI._NAME}')
Expand All @@ -58,7 +58,7 @@ def _build_multi_optimizers(self, model_list):
for model in model_list:
param_groups.append({'params':model.parameters()})
t = torch.optim.SGD(model.parameters(), lr=self.lr, momentum=self.params.momentum, weight_decay=self.params.weight_decay,nesterov=self.params.nesterov)

return t

def _build(self, model):
Expand All @@ -81,9 +81,7 @@ def __call__(self, model):

if mode == OptimizerAPI._MODE[0]:
optimizer_name = 'optimizer_'+''.join(include_parts)
model_combination = []
for temp in include_parts:
model_combination.append(model[temp])
model_combination = [model[temp] for temp in include_parts]
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function OptimizerAPI.__call__ refactored with the following changes:

optimizer_value = self._build(model_combination)
optimizer_dict.update({optimizer_name:optimizer_value})
elif mode == OptimizerAPI._MODE[1]:
Expand All @@ -94,6 +92,6 @@ def __call__(self, model):
optimizer_dict.update({optimizer_name:optimizer_value})
else:
raise Exception(f'Not support the optimizer mode, only support {OptimizerAPI._MODE}')

return optimizer_dict

13 changes: 6 additions & 7 deletions pyanomaly/core/other/kmeans.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@ def initialize(X, num_clusters):
"""
num_samples = len(X)
indices = np.random.choice(num_samples, num_clusters, replace=False)
initial_state = X[indices]
return initial_state
return X[indices]
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function initialize refactored with the following changes:



def kmeans(
Expand All @@ -41,10 +40,10 @@ def kmeans(
"""
print(f'running k-means on {device}..')

if distance == 'euclidean':
pairwise_distance_function = pairwise_distance
elif distance == 'cosine':
if distance == 'cosine':
pairwise_distance_function = pairwise_cosine
Comment on lines -44 to 47
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function kmeans refactored with the following changes:

  • Simplify conditional into switch-like form (switch)
  • Replace assignment with augmented assignment (aug-assign)

elif distance == 'euclidean':
pairwise_distance_function = pairwise_distance
else:
raise NotImplementedError

Expand All @@ -65,7 +64,7 @@ def kmeans(
choice_points = torch.argmin(dis, dim=0)
initial_state = X[choice_points]
initial_state = initial_state.to(device)

iteration = 0
# tqdm_meter = tqdm(desc='[running kmeans]')
while True:
Expand All @@ -89,7 +88,7 @@ def kmeans(
))

# increment iteration
iteration = iteration + 1
iteration += 1

# update tqdm meter
# tqdm_meter.set_postfix(
Expand Down
2 changes: 1 addition & 1 deletion pyanomaly/core/scheduler/schedulers.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def __init__(
warmup_method: str = "linear",
last_epoch: int = -1,
):
if not list(milestones) == sorted(milestones):
if list(milestones) != sorted(milestones):
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function WarmupMultiStepLR.__init__ refactored with the following changes:

  • Simplify logical expression using De Morgan identities (de-morgan)

raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}", milestones
)
Expand Down
Loading