diff --git a/autonav_ws/src/autonav_display/src/display.py b/autonav_ws/src/autonav_display/src/display.py index bc319140..5ede35a5 100644 --- a/autonav_ws/src/autonav_display/src/display.py +++ b/autonav_ws/src/autonav_display/src/display.py @@ -95,7 +95,7 @@ def __init__(self): self.filteredSubscriber = self.create_subscription(CompressedImage, "/autonav/cfg_space/raw/image/left", self.filteredCallbackLeft, 20) self.filteredSubscriber = self.create_subscription(CompressedImage, "/autonav/cfg_space/raw/image/right", self.filteredCallbackRight, 20) self.bigboiSubscriber = self.create_subscription(CompressedImage, "/autonav/cfg_space/raw/debug", self.bigboiCallback, 20) - self.debugAStarSubscriber = self.create_subscription(CompressedImage, "/autonav/debug/astar/image", self.debugAStarCallback, 20) + self.debugAStarSubscriber = self.create_subscription(CompressedImage, "/autonav/cfg_space/expanded/image", self.debugAStarCallback, 20) self.get_logger().info("Starting event loop") diff --git a/autonav_ws/src/autonav_vision/src/combination.py b/autonav_ws/src/autonav_vision/src/combination.py index 2d8255ce..527f256a 100644 --- a/autonav_ws/src/autonav_vision/src/combination.py +++ b/autonav_ws/src/autonav_vision/src/combination.py @@ -17,7 +17,7 @@ g_bridge = CvBridge() g_mapData = MapMetaData() -g_mapData.width = 200 +g_mapData.width = 100 g_mapData.height = 100 g_mapData.resolution = 0.1 g_mapData.origin = Pose() @@ -56,6 +56,60 @@ def image_received_right(self, msg): self.image_right = msg self.try_combine_images() + def order_points(self, pts): + # initialzie a list of coordinates that will be ordered + # such that the first entry in the list is the top-left, + # the second entry is the top-right, the third is the + # bottom-right, and the fourth is the bottom-left + rect = np.zeros((4, 2), dtype = "float32") + # the top-left point will have the smallest sum, whereas + # the bottom-right point will have the largest sum + s = pts.sum(axis = 1) + rect[0] = pts[np.argmin(s)] + rect[2] = pts[np.argmax(s)] + # now, compute the difference between the points, the + # top-right point will have the smallest difference, + # whereas the bottom-left will have the largest difference + diff = np.diff(pts, axis = 1) + rect[1] = pts[np.argmin(diff)] + rect[3] = pts[np.argmax(diff)] + # return the ordered coordinates + return rect + + + def four_point_transform(self, image, pts): + # obtain a consistent order of the points and unpack them + # individually + rect = self.order_points(pts) + (tl, tr, br, bl) = rect + # compute the width of the new image, which will be the + # maximum distance between bottom-right and bottom-left + # x-coordiates or the top-right and top-left x-coordinates + widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) + widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) + maxWidth = max(int(widthA), int(widthB)) + # compute the height of the new image, which will be the + # maximum distance between the top-right and bottom-right + # y-coordinates or the top-left and bottom-left y-coordinates + heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) + heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) + maxHeight = max(int(heightA), int(heightB)) + # now that we have the dimensions of the new image, construct + # the set of destination points to obtain a "birds eye view", + # (i.e. top-down view) of the image, again specifying points + # in the top-left, top-right, bottom-right, and bottom-left + # order + dst = np.array([ + [0, 0], + [maxWidth - 1, 0], + [maxWidth - 1, maxHeight - 1], + [0, maxHeight - 1]], dtype="float32") + # compute the perspective transform matrix and then apply it + M = cv2.getPerspectiveTransform(rect, dst) + warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) + # return the warped image + return warped + def try_combine_images(self): if self.image_left is None or self.image_right is None: return @@ -65,7 +119,9 @@ def try_combine_images(self): cv2img_left = g_bridge.compressed_imgmsg_to_cv2(self.image_left) cv2img_right = g_bridge.compressed_imgmsg_to_cv2(self.image_right) combined = cv2.hconcat([cv2img_left, cv2img_right]) - datamap = cv2.resize(combined, dsize=(self.config.map_res * 2, self.config.map_res), interpolation=cv2.INTER_LINEAR) / 2 + pts = [(147, 287), (851, 232), (955, 678), (0, 678)] + combined = self.four_point_transform(combined, np.array(pts)) + datamap = cv2.resize(combined, dsize=(self.config.map_res, self.config.map_res), interpolation=cv2.INTER_LINEAR) / 2 flat = list(datamap.flatten().astype(int)) msg = OccupancyGrid(info=g_mapData, data=flat) diff --git a/autonav_ws/src/autonav_vision/src/expandify.cpp b/autonav_ws/src/autonav_vision/src/expandify.cpp index 61f85883..89456876 100644 --- a/autonav_ws/src/autonav_vision/src/expandify.cpp +++ b/autonav_ws/src/autonav_vision/src/expandify.cpp @@ -95,10 +95,10 @@ class ExpandifyNode : public SCR::Node return; } - std::vector cfg_space = std::vector((config.map_res * 2) * config.map_res); + std::vector cfg_space = std::vector((config.map_res) * config.map_res); std::fill(cfg_space.begin(), cfg_space.end(), 0); - for (int x = 0; x < config.map_res * 2; x++) + for (int x = 0; x < config.map_res; x++) { for (int y = 1; y < config.map_res; y++) { @@ -107,7 +107,7 @@ class ExpandifyNode : public SCR::Node for (Circle &circle : circles) { auto idx = (x + circle.x) + config.map_res * (y + circle.y); - auto expr_x = (x + circle.x) < config.map_res * 2 && (x + circle.x) >= 0; + auto expr_x = (x + circle.x) < config.map_res && (x + circle.x) >= 0; auto expr_y = (y + circle.y) < config.map_res && (y + circle.y) >= 0; if (expr_x && expr_y) { @@ -151,8 +151,8 @@ class ExpandifyNode : public SCR::Node nav_msgs::msg::MapMetaData map; - float maxRange = 0.65; - float noGoPercent = 0.70; + float maxRange = 0.10; + float noGoPercent = 0.38; int noGoRange = 0; std::vector circles; ExpandifyConfig config; diff --git a/autonav_ws/src/autonav_vision/src/transformations.py b/autonav_ws/src/autonav_vision/src/transformations.py index 508ffeaf..19cf8d77 100644 --- a/autonav_ws/src/autonav_vision/src/transformations.py +++ b/autonav_ws/src/autonav_vision/src/transformations.py @@ -17,7 +17,7 @@ g_bridge = CvBridge() g_mapData = MapMetaData() -g_mapData.width = 200 +g_mapData.width = 100 g_mapData.height = 100 g_mapData.resolution = 0.1 g_mapData.origin = Pose() @@ -37,6 +37,8 @@ def __init__(self): self.blur_iterations = 3 self.rod_offset = 130 self.map_res = 80 + self.image_warp_tl = 0.26 + self.image_warp_tr = 0.26 class ImageTransformer(Node): @@ -71,19 +73,6 @@ def regionOfDisinterest(self, img, vertices): masked_image = cv2.bitwise_and(img, mask) return masked_image - def flattenImage(self, img): - top_left = (int)(img.shape[1] * 0.26), (int)(img.shape[0]) - top_right = (int)(img.shape[1] - img.shape[1] * 0.26), (int)(img.shape[0]) - bottom_left = 0, 0 - bottom_right = (int)(img.shape[1]), 0 - - src_pts = np.float32([[top_left], [top_right], [bottom_left], [bottom_right]]) - dest_pts = np.float32([[0, 640], [480, 640], [0, 0], [480, 0]]) - - matrix = cv2.getPerspectiveTransform(dest_pts, src_pts) - output = cv2.warpPerspective(img, matrix, (480, 640)) - return output - def publishOccupancyMap(self, img): datamap = cv2.resize(img, dsize=(self.config.map_res, self.config.map_res), interpolation=cv2.INTER_LINEAR) / 2 flat = list(datamap.flatten().astype(int)) @@ -111,27 +100,26 @@ def onImageReceived(self, image = CompressedImage): if self.dir == "left": region_of_disinterest_vertices = [ (width, height), - (width, height / 2), + (width, height / 1.8), (0, height) ] else: region_of_disinterest_vertices = [ (0, height), - (0, height / 2), + (0, height / 1.8), (width, height) ] # Apply region of disinterest and flattening mask = self.regionOfDisinterest(mask, np.array([region_of_disinterest_vertices], np.int32)) mask[mask < 250] = 0 - mask = self.flattenImage(mask) # Actually generate the map self.publishOccupancyMap(mask) # Preview the image preview_image = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB) - cv2.polylines(preview_image, np.array([region_of_disinterest_vertices], np.int32), True, (0, 255, 0), 2) + # cv2.polylines(preview_image, np.array([region_of_disinterest_vertices], np.int32), True, (0, 255, 0), 2) preview_msg = g_bridge.cv2_to_compressed_imgmsg(preview_image) preview_msg.header = image.header preview_msg.format = "jpeg" diff --git a/display/scripts/globals.js b/display/scripts/globals.js index 810b75f8..1052e765 100644 --- a/display/scripts/globals.js +++ b/display/scripts/globals.js @@ -43,7 +43,9 @@ var addressKeys = { "blur_weight": "int", "blur_iterations": "int", "rod_offset": "int", - "map_res": "int" + "map_res": "int", + "image_warp_tl": "float", + "image_warp_tr": "float" }, "autonav_vision_expandifier": { diff --git a/display/scripts/main.js b/display/scripts/main.js index c0c87708..d16495e7 100644 --- a/display/scripts/main.js +++ b/display/scripts/main.js @@ -602,7 +602,7 @@ $(document).ready(function () { input.classList.add("form-control"); input.value = data; input.onchange = function () { - config[device][text] = input.value; + config[device][text] = parseFloat(input.value); send({ op: "configuration", device: device, @@ -628,7 +628,7 @@ $(document).ready(function () { input.classList.add("form-control"); input.value = data; input.onchange = function () { - config[device][text] = input.value; + config[device][text] = parseInt(input.value); send({ op: "configuration", device: device,