Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Alphapose #113

Open
wants to merge 13 commits into
base: master
Choose a base branch
from
Prev Previous commit
Next Next commit
add alphapose wrappers
  • Loading branch information
alpha-carinae29 committed Dec 20, 2020
commit 1ce8f1610acb792caeb28565dbacec00515a03f1
5 changes: 3 additions & 2 deletions libs/detectors/x86/alphapose/alphapose.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ class Detector:
def __init__(self, config):
self.config = config
self.name = config.get_section_dict('Detector')['Name']
self.w, self.h, _ = [int(i) for i in self.config.get_section_dict('Detector')['ImageSize'].split(',')]
self.cfg = config_parser.parse("configs/config.yaml")
self.device = torch.device("cuda" if config.get_section_dict('Detector')['Gpu'] else "cpu")
self._input_size = self.cfg.DATA_PRESET.IMAGE_SIZE
Expand All @@ -40,14 +41,14 @@ def load_model(self):
def inference(self, image):
detections = self.detection_model.inference(image)
# TODO
detections = prepare_detection_results(detections)
detections = prepare_detection_results(detections, self.w, self.h)
with torch.no_grad():
inps, cropped_boxes, boxes, scores, ids = self.transform_detections(image, detections)
inps = inps.to(self.device)
hm = self.pose_model(inps)
poses = self.post_process(hm, cropped_boxes, boxes, scores, ids)
# TODO
results = prepare_poses_results(poses)
results = prepare_poses_results(poses, self.w, self.h, scores)
return results

def transform_detections(self, image, dets):
Expand Down
49 changes: 45 additions & 4 deletions libs/detectors/x86/alphapose/wrappers.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,47 @@
def prepare_detection_results(detections):
pass
import numpy as np
import torch


def prepare_poses_results(poses):
pass
def prepare_detection_results(object_list, w, h):
scale_factors = torch.tensor([w, h, w, h])
num_of_objects = len(object_list)
output = torch.zeros(num_of_objects, 8, dtype=torch.float32)
output[:, 6] = 0.99
for i, obj in enumerate(object_list):
bbox = torch.tensor([obj["bbox"][1], obj["bbox"][0], obj["bbox"][3], obj["bbox"][2]])
bbox_scaled = bbox * scale_factors
output[i, 1:5] = bbox_scaled
output[i, [1, 3]] = torch.clamp(output[i, [1, 3]], 0.0, w)
output[i, [4, 2]] = torch.clamp(output[i, [2, 4]], 0.0, h)
output[i, 5] = obj["score"]

return output


def prepare_poses_results(poses, w, h, scores):
scales = np.array([h, w, h, w])
results = []
for i, item in enumerate(poses):
object_dict = dict()
bboxes = np.array([item["bbox"][1], item["bbox"][0], item["bbox"][3], item["bbox"][2]])
bboxes_scaled = np.divide(bboxes, scales)
object_dict["id"] = "1-" + str(i)
object_dict["bbox"] = bboxes_scaled.tolist()
object_dict["score"] = scores[i].item()
kp_scores = item["kp_score"].numpy()
keypoints = item["keypoints"]
if np.all(kp_scores[[0, 1, 2, 5, 6]] > 0.15):
x_min_face = int(keypoints[6, 0])
x_max_face = int(keypoints[5, 0])
y_max_face = int((keypoints[5, 1] + keypoints[6, 1]) / 2)
y_eyes = int((keypoints[1, 1] + keypoints[2, 1]) / 2)
y_min_face = 2 * y_eyes - y_max_face
if (y_max_face - y_min_face > 0) and (x_max_face - x_min_face > 0):
h_crop = y_max_face - y_min_face
x_min_face = int(max(0, x_min_face - 0.1 * h_crop))
y_min_face = int(max(0, y_min_face - 0.1 * h_crop))
x_max_face = int(min(w, x_min_face + 1.1 * h_crop))
y_max_face = int(min(h, y_min_face + 1.1 * h_crop))
object_dict["face"] = [y_min_face / h, x_min_face / w, y_max_face / h, x_max_face / w]
results.append(object_dict)
return results