Skip to content

Commit

Permalink
Move the template into a new directory as part of the share/ installa…
Browse files Browse the repository at this point in the history
…tion target, look for assets using the share path not the absolute path of the file. Add start_camera, stop_camera services to start & stop the camera pipeline
  • Loading branch information
civerachb-cpr committed Sep 27, 2024
1 parent 78cd441 commit 89bf73f
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 9 deletions.
1 change: 1 addition & 0 deletions turtlebot4_vision_tutorials/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
(os.path.join('share', package_name, 'models'), glob('models/*.blob')),
(os.path.join('share', package_name, 'launch'), glob('launch/*.launch.py')),
(os.path.join('share', package_name, 'config'), glob('config/*.yaml')),
(os.path.join('share', package_name, 'templates'), glob('templates/*.py')),
],
install_requires=['setuptools'],
zip_safe=True,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,14 +90,14 @@ def pd_postprocess(inference, crop_region):
xnorm.append(xn)
ynorm.append(yn)
scores.append(inference[3*i+2])
x.append(int(xmin + xn * size))
y.append(int(ymin + yn * size))
x.append(int(xmin + xn * size))
y.append(int(ymin + yn * size))

next_crop_region = determine_crop_region(scores, x, y) if ${_smart_crop} else init_crop_region
return x, y, xnorm, ynorm, scores, next_crop_region

node.warn("Processing node started")
# Defines the default crop region (pads the full image from both sides to make it a square image)
# Defines the default crop region (pads the full image from both sides to make it a square image)
# Used when the algorithm cannot reliably determine the crop region from the previous frame.
init_crop_region = ${_init_crop_region}
crop_region = init_crop_region
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,16 @@

from turtlebot4_vision_tutorials.FPS import FPS

SCRIPT_DIR = Path(__file__).resolve().parent
MOVENET_LIGHTNING_MODEL = os.path.join(SCRIPT_DIR.parent,
from ament_index_python.packages import get_package_share_directory

SHARE_DIR = get_package_share_directory('turtlebot4_vision_tutorials')
MOVENET_LIGHTNING_MODEL = os.path.join(SHARE_DIR,
"models",
"movenet_singlepose_lightning_U8_transpose.blob")
MOVENET_THUNDER_MODEL = os.path.join(SCRIPT_DIR.parent,
MOVENET_THUNDER_MODEL = os.path.join(SHARE_DIR,
"models",
"movenet_singlepose_thunder_U8_transpose.blob")
TEMPLATE_DIR = os.path.join(SHARE_DIR, 'templates')

# Dictionary that maps from joint names to keypoint indices.
KEYPOINT_DICT = {
Expand Down Expand Up @@ -308,7 +311,7 @@ def build_processing_script(self):
which is a python template
'''
# Read the template
with open(os.path.join(SCRIPT_DIR, 'template_processing_script.py'), 'r') as file:
with open(os.path.join(TEMPLATE_DIR, 'template_processing_script.py'), 'r') as file:
template = Template(file.read())

# Perform the substitution
Expand Down Expand Up @@ -346,7 +349,6 @@ def pd_postprocess(self, inference):
return body

def next_frame(self):

self.fps.update()

# Get the device camera frame if wanted
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import rclpy
from rclpy.node import Node
from std_msgs.msg import String as string_msg
from std_srvs.srv import Trigger
from sensor_msgs.msg import Image

from irobot_create_msgs.msg import LightringLeds
Expand Down Expand Up @@ -113,6 +114,20 @@ def __init__(self):
10,
)

self._is_paused = False

self.start_camera_srv = self.create_service(
Trigger,
'start_camera',
self.handle_start_camera
)

self.stop_camera_srv = self.create_service(
Trigger,
'stop_camera',
self.handle_stop_camera
)

timer_period = 0.0833 # seconds
self.timer = self.create_timer(timer_period, self.pose_detect)

Expand All @@ -133,6 +148,9 @@ def __init__(self):
# self.button_1_function()

def pose_detect(self):
if self._is_paused:
return

if not (POSE_DETECT_ENABLE):
return
# Run movenet on next frame
Expand Down Expand Up @@ -278,6 +296,30 @@ def autonomous_lights(self):
# Publish the message
self.lightring_publisher.publish(lightring_msg)

def handle_start_camera(self, req, resp):
if self._is_paused:
self._is_paused = False
self.pose = MovenetDepthai(input_src='rgb',
model='thunder',
score_thresh=0.3,
crop=not 'store_true',
smart_crop=not 'store_true',
internal_frame_height=432)
resp.success = True
else:
resp.message = 'Device already running'
return resp

def handle_stop_camera(self, req, resp):
if not self._is_paused:
self._is_paused = True
self.pose.device.close()
self.pose = None
resp.success = True
else:
resp.message = 'Device already stopped'
return resp


def main(args=None):
rclpy.init(args=args)
Expand Down

0 comments on commit 89bf73f

Please sign in to comment.