From 89bf73f2108dd9ee2ea35b1a00c4b7c213ecb0ae Mon Sep 17 00:00:00 2001 From: Chris Iverach-Brereton Date: Fri, 27 Sep 2024 09:00:26 -0400 Subject: [PATCH] Move the template into a new directory as part of the share/ installation target, look for assets using the share path not the absolute path of the file. Add start_camera, stop_camera services to start & stop the camera pipeline --- turtlebot4_vision_tutorials/setup.py | 1 + .../template_processing_script.py | 8 ++-- .../MovenetDepthaiEdge.py | 12 +++--- .../pose_detection.py | 42 +++++++++++++++++++ 4 files changed, 54 insertions(+), 9 deletions(-) rename turtlebot4_vision_tutorials/{turtlebot4_vision_tutorials => templates}/template_processing_script.py (97%) diff --git a/turtlebot4_vision_tutorials/setup.py b/turtlebot4_vision_tutorials/setup.py index 9956f9d..dac5423 100644 --- a/turtlebot4_vision_tutorials/setup.py +++ b/turtlebot4_vision_tutorials/setup.py @@ -16,6 +16,7 @@ (os.path.join('share', package_name, 'models'), glob('models/*.blob')), (os.path.join('share', package_name, 'launch'), glob('launch/*.launch.py')), (os.path.join('share', package_name, 'config'), glob('config/*.yaml')), + (os.path.join('share', package_name, 'templates'), glob('templates/*.py')), ], install_requires=['setuptools'], zip_safe=True, diff --git a/turtlebot4_vision_tutorials/turtlebot4_vision_tutorials/template_processing_script.py b/turtlebot4_vision_tutorials/templates/template_processing_script.py similarity index 97% rename from turtlebot4_vision_tutorials/turtlebot4_vision_tutorials/template_processing_script.py rename to turtlebot4_vision_tutorials/templates/template_processing_script.py index 82a03f1..47d4d34 100644 --- a/turtlebot4_vision_tutorials/turtlebot4_vision_tutorials/template_processing_script.py +++ b/turtlebot4_vision_tutorials/templates/template_processing_script.py @@ -90,14 +90,14 @@ def pd_postprocess(inference, crop_region): xnorm.append(xn) ynorm.append(yn) scores.append(inference[3*i+2]) - x.append(int(xmin + xn * size)) - y.append(int(ymin + yn * size)) - + x.append(int(xmin + xn * size)) + y.append(int(ymin + yn * size)) + next_crop_region = determine_crop_region(scores, x, y) if ${_smart_crop} else init_crop_region return x, y, xnorm, ynorm, scores, next_crop_region node.warn("Processing node started") -# Defines the default crop region (pads the full image from both sides to make it a square image) +# Defines the default crop region (pads the full image from both sides to make it a square image) # Used when the algorithm cannot reliably determine the crop region from the previous frame. init_crop_region = ${_init_crop_region} crop_region = init_crop_region diff --git a/turtlebot4_vision_tutorials/turtlebot4_vision_tutorials/MovenetDepthaiEdge.py b/turtlebot4_vision_tutorials/turtlebot4_vision_tutorials/MovenetDepthaiEdge.py index 382862d..18ef0e9 100644 --- a/turtlebot4_vision_tutorials/turtlebot4_vision_tutorials/MovenetDepthaiEdge.py +++ b/turtlebot4_vision_tutorials/turtlebot4_vision_tutorials/MovenetDepthaiEdge.py @@ -10,13 +10,16 @@ from turtlebot4_vision_tutorials.FPS import FPS -SCRIPT_DIR = Path(__file__).resolve().parent -MOVENET_LIGHTNING_MODEL = os.path.join(SCRIPT_DIR.parent, +from ament_index_python.packages import get_package_share_directory + +SHARE_DIR = get_package_share_directory('turtlebot4_vision_tutorials') +MOVENET_LIGHTNING_MODEL = os.path.join(SHARE_DIR, "models", "movenet_singlepose_lightning_U8_transpose.blob") -MOVENET_THUNDER_MODEL = os.path.join(SCRIPT_DIR.parent, +MOVENET_THUNDER_MODEL = os.path.join(SHARE_DIR, "models", "movenet_singlepose_thunder_U8_transpose.blob") +TEMPLATE_DIR = os.path.join(SHARE_DIR, 'templates') # Dictionary that maps from joint names to keypoint indices. KEYPOINT_DICT = { @@ -308,7 +311,7 @@ def build_processing_script(self): which is a python template ''' # Read the template - with open(os.path.join(SCRIPT_DIR, 'template_processing_script.py'), 'r') as file: + with open(os.path.join(TEMPLATE_DIR, 'template_processing_script.py'), 'r') as file: template = Template(file.read()) # Perform the substitution @@ -346,7 +349,6 @@ def pd_postprocess(self, inference): return body def next_frame(self): - self.fps.update() # Get the device camera frame if wanted diff --git a/turtlebot4_vision_tutorials/turtlebot4_vision_tutorials/pose_detection.py b/turtlebot4_vision_tutorials/turtlebot4_vision_tutorials/pose_detection.py index ccf7b29..008ecb1 100644 --- a/turtlebot4_vision_tutorials/turtlebot4_vision_tutorials/pose_detection.py +++ b/turtlebot4_vision_tutorials/turtlebot4_vision_tutorials/pose_detection.py @@ -24,6 +24,7 @@ import rclpy from rclpy.node import Node from std_msgs.msg import String as string_msg +from std_srvs.srv import Trigger from sensor_msgs.msg import Image from irobot_create_msgs.msg import LightringLeds @@ -113,6 +114,20 @@ def __init__(self): 10, ) + self._is_paused = False + + self.start_camera_srv = self.create_service( + Trigger, + 'start_camera', + self.handle_start_camera + ) + + self.stop_camera_srv = self.create_service( + Trigger, + 'stop_camera', + self.handle_stop_camera + ) + timer_period = 0.0833 # seconds self.timer = self.create_timer(timer_period, self.pose_detect) @@ -133,6 +148,9 @@ def __init__(self): # self.button_1_function() def pose_detect(self): + if self._is_paused: + return + if not (POSE_DETECT_ENABLE): return # Run movenet on next frame @@ -278,6 +296,30 @@ def autonomous_lights(self): # Publish the message self.lightring_publisher.publish(lightring_msg) + def handle_start_camera(self, req, resp): + if self._is_paused: + self._is_paused = False + self.pose = MovenetDepthai(input_src='rgb', + model='thunder', + score_thresh=0.3, + crop=not 'store_true', + smart_crop=not 'store_true', + internal_frame_height=432) + resp.success = True + else: + resp.message = 'Device already running' + return resp + + def handle_stop_camera(self, req, resp): + if not self._is_paused: + self._is_paused = True + self.pose.device.close() + self.pose = None + resp.success = True + else: + resp.message = 'Device already stopped' + return resp + def main(args=None): rclpy.init(args=args)