Skip to content

Commit

Permalink
Add image_warp_tl and image_warp_tr to globals.js and transform.py
Browse files Browse the repository at this point in the history
  • Loading branch information
dylanzemlin committed Mar 29, 2024
1 parent 03bcb64 commit 8dfd84b
Show file tree
Hide file tree
Showing 6 changed files with 75 additions and 29 deletions.
2 changes: 1 addition & 1 deletion autonav_ws/src/autonav_display/src/display.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def __init__(self):
self.filteredSubscriber = self.create_subscription(CompressedImage, "/autonav/cfg_space/raw/image/left", self.filteredCallbackLeft, 20)
self.filteredSubscriber = self.create_subscription(CompressedImage, "/autonav/cfg_space/raw/image/right", self.filteredCallbackRight, 20)
self.bigboiSubscriber = self.create_subscription(CompressedImage, "/autonav/cfg_space/raw/debug", self.bigboiCallback, 20)
self.debugAStarSubscriber = self.create_subscription(CompressedImage, "/autonav/debug/astar/image", self.debugAStarCallback, 20)
self.debugAStarSubscriber = self.create_subscription(CompressedImage, "/autonav/cfg_space/expanded/image", self.debugAStarCallback, 20)

self.get_logger().info("Starting event loop")

Expand Down
60 changes: 58 additions & 2 deletions autonav_ws/src/autonav_vision/src/combination.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

g_bridge = CvBridge()
g_mapData = MapMetaData()
g_mapData.width = 200
g_mapData.width = 100
g_mapData.height = 100
g_mapData.resolution = 0.1
g_mapData.origin = Pose()
Expand Down Expand Up @@ -56,6 +56,60 @@ def image_received_right(self, msg):
self.image_right = msg
self.try_combine_images()

def order_points(self, pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect


def four_point_transform(self, image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = self.order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped

def try_combine_images(self):
if self.image_left is None or self.image_right is None:
return
Expand All @@ -65,7 +119,9 @@ def try_combine_images(self):
cv2img_left = g_bridge.compressed_imgmsg_to_cv2(self.image_left)
cv2img_right = g_bridge.compressed_imgmsg_to_cv2(self.image_right)
combined = cv2.hconcat([cv2img_left, cv2img_right])
datamap = cv2.resize(combined, dsize=(self.config.map_res * 2, self.config.map_res), interpolation=cv2.INTER_LINEAR) / 2
pts = [(147, 287), (851, 232), (955, 678), (0, 678)]
combined = self.four_point_transform(combined, np.array(pts))
datamap = cv2.resize(combined, dsize=(self.config.map_res, self.config.map_res), interpolation=cv2.INTER_LINEAR) / 2
flat = list(datamap.flatten().astype(int))
msg = OccupancyGrid(info=g_mapData, data=flat)

Expand Down
10 changes: 5 additions & 5 deletions autonav_ws/src/autonav_vision/src/expandify.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,10 +95,10 @@ class ExpandifyNode : public SCR::Node
return;
}

std::vector<int8_t> cfg_space = std::vector<int8_t>((config.map_res * 2) * config.map_res);
std::vector<int8_t> cfg_space = std::vector<int8_t>((config.map_res) * config.map_res);
std::fill(cfg_space.begin(), cfg_space.end(), 0);

for (int x = 0; x < config.map_res * 2; x++)
for (int x = 0; x < config.map_res; x++)
{
for (int y = 1; y < config.map_res; y++)
{
Expand All @@ -107,7 +107,7 @@ class ExpandifyNode : public SCR::Node
for (Circle &circle : circles)
{
auto idx = (x + circle.x) + config.map_res * (y + circle.y);
auto expr_x = (x + circle.x) < config.map_res * 2 && (x + circle.x) >= 0;
auto expr_x = (x + circle.x) < config.map_res && (x + circle.x) >= 0;
auto expr_y = (y + circle.y) < config.map_res && (y + circle.y) >= 0;
if (expr_x && expr_y)
{
Expand Down Expand Up @@ -151,8 +151,8 @@ class ExpandifyNode : public SCR::Node

nav_msgs::msg::MapMetaData map;

float maxRange = 0.65;
float noGoPercent = 0.70;
float maxRange = 0.10;
float noGoPercent = 0.38;
int noGoRange = 0;
std::vector<Circle> circles;
ExpandifyConfig config;
Expand Down
24 changes: 6 additions & 18 deletions autonav_ws/src/autonav_vision/src/transformations.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
g_bridge = CvBridge()

g_mapData = MapMetaData()
g_mapData.width = 200
g_mapData.width = 100
g_mapData.height = 100
g_mapData.resolution = 0.1
g_mapData.origin = Pose()
Expand All @@ -37,6 +37,8 @@ def __init__(self):
self.blur_iterations = 3
self.rod_offset = 130
self.map_res = 80
self.image_warp_tl = 0.26
self.image_warp_tr = 0.26


class ImageTransformer(Node):
Expand Down Expand Up @@ -71,19 +73,6 @@ def regionOfDisinterest(self, img, vertices):
masked_image = cv2.bitwise_and(img, mask)
return masked_image

def flattenImage(self, img):
top_left = (int)(img.shape[1] * 0.26), (int)(img.shape[0])
top_right = (int)(img.shape[1] - img.shape[1] * 0.26), (int)(img.shape[0])
bottom_left = 0, 0
bottom_right = (int)(img.shape[1]), 0

src_pts = np.float32([[top_left], [top_right], [bottom_left], [bottom_right]])
dest_pts = np.float32([[0, 640], [480, 640], [0, 0], [480, 0]])

matrix = cv2.getPerspectiveTransform(dest_pts, src_pts)
output = cv2.warpPerspective(img, matrix, (480, 640))
return output

def publishOccupancyMap(self, img):
datamap = cv2.resize(img, dsize=(self.config.map_res, self.config.map_res), interpolation=cv2.INTER_LINEAR) / 2
flat = list(datamap.flatten().astype(int))
Expand Down Expand Up @@ -111,27 +100,26 @@ def onImageReceived(self, image = CompressedImage):
if self.dir == "left":
region_of_disinterest_vertices = [
(width, height),
(width, height / 2),
(width, height / 1.8),
(0, height)
]
else:
region_of_disinterest_vertices = [
(0, height),
(0, height / 2),
(0, height / 1.8),
(width, height)
]

# Apply region of disinterest and flattening
mask = self.regionOfDisinterest(mask, np.array([region_of_disinterest_vertices], np.int32))
mask[mask < 250] = 0
mask = self.flattenImage(mask)

# Actually generate the map
self.publishOccupancyMap(mask)

# Preview the image
preview_image = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)
cv2.polylines(preview_image, np.array([region_of_disinterest_vertices], np.int32), True, (0, 255, 0), 2)
# cv2.polylines(preview_image, np.array([region_of_disinterest_vertices], np.int32), True, (0, 255, 0), 2)
preview_msg = g_bridge.cv2_to_compressed_imgmsg(preview_image)
preview_msg.header = image.header
preview_msg.format = "jpeg"
Expand Down
4 changes: 3 additions & 1 deletion display/scripts/globals.js
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,9 @@ var addressKeys = {
"blur_weight": "int",
"blur_iterations": "int",
"rod_offset": "int",
"map_res": "int"
"map_res": "int",
"image_warp_tl": "float",
"image_warp_tr": "float"
},

"autonav_vision_expandifier": {
Expand Down
4 changes: 2 additions & 2 deletions display/scripts/main.js
Original file line number Diff line number Diff line change
Expand Up @@ -602,7 +602,7 @@ $(document).ready(function () {
input.classList.add("form-control");
input.value = data;
input.onchange = function () {
config[device][text] = input.value;
config[device][text] = parseFloat(input.value);
send({
op: "configuration",
device: device,
Expand All @@ -628,7 +628,7 @@ $(document).ready(function () {
input.classList.add("form-control");
input.value = data;
input.onchange = function () {
config[device][text] = input.value;
config[device][text] = parseInt(input.value);
send({
op: "configuration",
device: device,
Expand Down

0 comments on commit 8dfd84b

Please sign in to comment.