diff --git a/.github/workflows/reusable-sphinx-build.yml b/.github/workflows/reusable-sphinx-build.yml index d9de9b353..f787beef2 100644 --- a/.github/workflows/reusable-sphinx-build.yml +++ b/.github/workflows/reusable-sphinx-build.yml @@ -3,7 +3,7 @@ name: Build Sphinx documentation 📖 on: push: branches: - - dev + - MS2 # ---------------------------------------------------------------------------------------------------------------------- @@ -40,4 +40,4 @@ jobs: working-directory: ./doc run: | sudo apt install pandoc - make html \ No newline at end of file + make html diff --git a/demos/hsrb-real-test-demos/gripper-real.py b/demos/hsrb-real-test-demos/gripper-real.py deleted file mode 100644 index 33a95b703..000000000 --- a/demos/hsrb-real-test-demos/gripper-real.py +++ /dev/null @@ -1,24 +0,0 @@ -from tmc_control_msgs.msg import GripperApplyEffortActionGoal - -from pycram.designators.action_designator import * - -def open_gripper(): - """ Opens the gripper of the HSR """ - pub_gripper = rospy.Publisher('/hsrb/gripper_controller/grasp/goal', GripperApplyEffortActionGoal, - queue_size=10) - rate = rospy.Rate(10) - rospy.sleep(2) - msg = GripperApplyEffortActionGoal() # sprechen joint gripper_controll_manager an, indem wir goal publishen type den giskard fürs greifen erwartet - msg.goal.effort = 0.8 - pub_gripper.publish(msg) - - -def close_gripper(): - """ Closes the gripper of the HSR """ - pub_gripper = rospy.Publisher('/hsrb/gripper_controller/grasp/goal', GripperApplyEffortActionGoal, - queue_size=10) - rate = rospy.Rate(10) - rospy.sleep(2) - msg = GripperApplyEffortActionGoal() - msg.goal.effort = -0.8 - pub_gripper.publish(msg) \ No newline at end of file diff --git a/demos/hsrb-real-test-demos/rigid-body-test.py b/demos/hsrb-real-test-demos/rigid-body-test.py deleted file mode 100644 index 43c9604bb..000000000 --- a/demos/hsrb-real-test-demos/rigid-body-test.py +++ /dev/null @@ -1,49 +0,0 @@ -import numpy as np -import rospy -from tmc_control_msgs.msg import GripperApplyEffortActionGoal - -from pycram.designators.action_designator import * -from pycram.designators.location_designator import * -from pycram.designators.object_designator import * -from pycram.designators.motion_designator import * -from pycram.pose import Pose -from pycram.bullet_world import BulletWorld, Object -from pycram.process_module import simulated_robot, with_simulated_robot, semi_real_robot, real_robot -from pycram.enums import ObjectType -from pycram.ros.robot_state_updater import RobotStateUpdater -from pycram.ros.viz_marker_publisher import VizMarkerPublisher -import pycram.external_interfaces.giskard as giskardpy -import rospy - -from pycram.designators.action_designator import * -from pycram.designators.location_designator import * -from pycram.designators.object_designator import * -from pycram.designators.motion_designator import * -from pycram.pose import Pose -from pycram.bullet_world import BulletWorld, Object -from pycram.process_module import simulated_robot, with_simulated_robot, semi_real_robot, real_robot -from pycram.enums import ObjectType -from pycram.ros.robot_state_updater import RobotStateUpdater -from pycram.ros.viz_marker_publisher import VizMarkerPublisher -import pycram.external_interfaces.giskard as giskardpy - -from geometry_msgs.msg import Point -# Initialize the Bullet world for simulation -world = BulletWorld() - -# Visualization Marker Publisher for ROS -v = VizMarkerPublisher() - -import pybullet as p -#robot = Object("hsrb", ObjectType.ROBOT, "../../resources/hsrb.urdf", pose=Pose([1, 2, 0])) -#robot.set_color([0.5, 0.5, 0.9, 1]) - -id = BulletWorld.current_bullet_world.add_rigid_box(Pose(), (0.5,0.5,0,5), [1,0,0,1]) -box_object = Object("box" + "_" + str(rospy.get_time()), "box", pose=Pose(), color=[1,0,0,1], id=id, - customGeom={"size": [0.5, 0.5, 0.5]}) -#box_object.set_pose(Pose()) -#box_desig = ObjectDesignatorDescription.Object(box_object.name, box_object.type, box_object) - -#milk = Object("milk", "milk", "milk.stl", pose=Pose([4.8, 2.6, 0.87]),color=[0, 1, 0, 1]) -#print (milk) -#print(box_object) \ No newline at end of file diff --git a/demos/hsrb_test_demos/pick-up-semi-real.py b/demos/hsrb_test_demos/pick-up-semi-real.py deleted file mode 100644 index e78303615..000000000 --- a/demos/hsrb_test_demos/pick-up-semi-real.py +++ /dev/null @@ -1,88 +0,0 @@ -import rospy - -from pycram.designators.action_designator import * -from pycram.designators.location_designator import * -from pycram.designators.object_designator import * -from pycram.designators.motion_designator import * -from pycram.pose import Pose -from pycram.bullet_world import BulletWorld, Object -from pycram.process_module import simulated_robot, with_simulated_robot, semi_real_robot, real_robot -from pycram.enums import ObjectType -from pycram.ros.robot_state_updater import RobotStateUpdater -from pycram.ros.viz_marker_publisher import VizMarkerPublisher -import pycram.external_interfaces.giskard as giskardpy -from geometry_msgs.msg import Point -# Initialize the Bullet world for simulation -world = BulletWorld("DIRECT") - -# Visualization Marker Publisher for ROS -v = VizMarkerPublisher() - -# Initialize Giskard interface for motion planning -giskardpy.init_giskard_interface() - -# Create and configure the robot object -robot = Object("hsrb", ObjectType.ROBOT, "hsrb.urdf", pose=Pose([1, 2, 0])) -robot.set_color([0.5, 0.5, 0.9, 1]) - -# Create environmental objects -#apartment = Object("kitchen", ObjectType.ENVIRONMENT, "test-room.urdf") - -# Define orientation for objects -object_orientation = axis_angle_to_quaternion([0, 0, 1], 0) - -# Define a breakfast cereal object -breakfast_cereal = Object("breakfast_cereal", "breakfast_cereal", "breakfast_cereal.stl", pose=Pose([4.8, 2.6, 0.87]),color=[0, 1, 0, 1]) -# fork = Object("Fork", "fork", "spoon.stl", pose=Pose([-2.8, 2.3, 0.368], object_orientation), color=[1, 0, 0, 1]) -# spoon = Object("Spoon", "spoon", "spoon.stl", pose=Pose([-2.5, 2.3, 0.368], object_orientation), color=[0, 1, 0, 1]) -# metalmug = Object("Metalmug", "metalmug", "bowl.stl", pose=Pose([-3.1, 2.3, 0.39]), color=[0, 1, 0, 1]) -# plate = Object("Plate", "plate", "board.stl", pose=Pose([-2.2, 2.3, 0.368], object_orientation), color=[0, 1, 0, 1]) -#bowl = Object("bowl", "bowl", "bowl.stl", pose=Pose([4.8, 2.6, 0.87]), color=[0, 1, 0, 1]) - -# Set the world's gravity -#world.set_gravity([0.0, 0.0, 9.81]) - -# Update robot state -RobotStateUpdater("/tf", "/giskard_joint_states") - - -# Define a function for object detection and interaction -def look_and_detect(obj): - """ - Directs the robot to look at an object and perform detection. - - Args: - obj (Object): The object to detect. - - Returns: - ObjectDesignator: The detected object designator. - """ - LookAtAction(targets=[obj.pose]).resolve().perform() - object_desig = DetectAction(BelieveObject(types=[obj.type]), technique='default').resolve().perform() - return object_desig - - -# Main interaction sequence with semi-real robot -with semi_real_robot: - rospy.loginfo("Starting demo") - # Initial setup actions - ParkArmsAction([Arms.LEFT]).resolve().perform() - MoveTorsoAction([0.25]).resolve().perform() - - # Navigate to cereal location - NavigateAction(target_locations=[Pose([4.1, 2.2, 0], object_orientation)]).resolve().perform() - - # Detect and interact with the cereal - dict_desig = look_and_detect(obj=breakfast_cereal) - giskardpy.avoid_all_collisions() - PickUpAction((dict_desig["breakfast_cereal"]), ["left"], ["front"]).resolve().perform() - - # Place the cereal at the target location - cereal_target_pose = Pose([4.1, 2.2, 0], object_orientation) - NavigateAction(target_locations=[cereal_target_pose]).resolve().perform() - PlaceAction(dict_desig["breakfast_cereal"], ["left"], ["front"], [Pose([4.8, 2.6, 0.97])]).resolve().perform() - - # Finalize actions - ParkArmsAction([Arms.LEFT]).resolve().perform() - rospy.loginfo("Done!") - diff --git a/demos/hsrb-real-test-demos/pick-up-real.py b/demos/pycram-hsrb-real-test-demos/pick-up-real.py similarity index 100% rename from demos/hsrb-real-test-demos/pick-up-real.py rename to demos/pycram-hsrb-real-test-demos/pick-up-real.py diff --git a/demos/hsrb-real-test-demos/__init__.py b/demos/pycram_clean_the_table_demo/__init__.py similarity index 100% rename from demos/hsrb-real-test-demos/__init__.py rename to demos/pycram_clean_the_table_demo/__init__.py diff --git a/demos/pycram_clean_the_table_demo/clean_table_ms1_demo.py b/demos/pycram_clean_the_table_demo/deprecated/clean_table_ms1_demo.py similarity index 100% rename from demos/pycram_clean_the_table_demo/clean_table_ms1_demo.py rename to demos/pycram_clean_the_table_demo/deprecated/clean_table_ms1_demo.py diff --git a/demos/pycram_clean_the_table_demo/clean_table_real_hsr_ms1_demo.py b/demos/pycram_clean_the_table_demo/deprecated/clean_table_real_hsr_ms1_demo.py similarity index 100% rename from demos/pycram_clean_the_table_demo/clean_table_real_hsr_ms1_demo.py rename to demos/pycram_clean_the_table_demo/deprecated/clean_table_real_hsr_ms1_demo.py diff --git a/demos/pycram_receptionist/M2_Demo.py b/demos/pycram_receptionist_demo/M2_Demo.py similarity index 89% rename from demos/pycram_receptionist/M2_Demo.py rename to demos/pycram_receptionist_demo/M2_Demo.py index b9690fef6..218475f84 100644 --- a/demos/pycram_receptionist/M2_Demo.py +++ b/demos/pycram_receptionist_demo/M2_Demo.py @@ -1,20 +1,15 @@ import rospy -from geometry_msgs.msg import PoseStamped -from robokudo_msgs.msg import QueryActionGoal -from pycram.designators.action_designator import DetectAction, LookAtAction, NavigateAction -from pycram.designators.motion_designator import TalkingMotion, MoveMotion -from pycram.external_interfaces import robokudo -from pycram.process_module import simulated_robot, with_simulated_robot, real_robot, with_real_robot, semi_real_robot +from pycram.designators.action_designator import DetectAction, NavigateAction +from pycram.designators.motion_designator import TalkingMotion +from pycram.process_module import real_robot import pycram.external_interfaces.giskard as giskardpy -from pycram.ros.robot_state_updater import RobotStateUpdater from pycram.ros.viz_marker_publisher import VizMarkerPublisher from pycram.designators.location_designator import * from pycram.designators.object_designator import * -from pycram.enums import ObjectType from pycram.bullet_world import BulletWorld, Object from pycram.external_interfaces.knowrob import instances_of, get_guest_info from std_msgs.msg import String, Bool -import talk_actions +from demos.pycram_receptionist_demo.deprecated import talk_actions world = BulletWorld("DIRECT") # /pycram/viz_marker topic bei Marker Array diff --git a/demos/pycram_receptionist_demo/MS2_example-v.py b/demos/pycram_receptionist_demo/MS2_example-v.py new file mode 100644 index 000000000..2e90e176d --- /dev/null +++ b/demos/pycram_receptionist_demo/MS2_example-v.py @@ -0,0 +1,131 @@ +import rospy + +from pycram.designators.action_designator import DetectAction, NavigateAction +from pycram.designators.motion_designator import TalkingMotion +from pycram.fluent import Fluent +from pycram.helper import axis_angle_to_quaternion +from pycram.process_module import real_robot +import pycram.external_interfaces.giskard as giskardpy +from pycram.ros.robot_state_updater import RobotStateUpdater +from pycram.ros.viz_marker_publisher import VizMarkerPublisher +from pycram.designators.location_designator import * +from pycram.designators.object_designator import * +from pycram.bullet_world import BulletWorld, Object +from std_msgs.msg import String +from demos.pycram_receptionist_demo.deprecated import talk_actions +import pycram.external_interfaces.navigate as moveBase + +world = BulletWorld("DIRECT") +# /pycram/viz_marker topic bei Marker Array +v = VizMarkerPublisher() + +robot = Object("hsrb", "robot", "../../resources/" + robot_description.name + ".urdf") +robot_desig = ObjectDesignatorDescription(names=["hsrb"]).resolve() +robot.set_color([0.5, 0.5, 0.9, 1]) + +# carefull that u spawn the correct kitchen +kitchen = Object("kitchen", "environment", "kitchen.urdf") +giskardpy.init_giskard_interface() +RobotStateUpdater("/tf", "/giskard_joint_states") + +robot_orientation_couch = axis_angle_to_quaternion([0, 0, 1], 0) +pose_couch = Pose([3, 5, 0], robot_orientation_couch) + +robot_orientation_from_couch = axis_angle_to_quaternion([0, 0, 1], -90) +pose_from_couch = Pose([4.2, 3.8, 0], robot_orientation_from_couch) + +robot_orientation = axis_angle_to_quaternion([0, 0, 1], 90) +pose_kitchen_to_couch = Pose([4.2, 3, 0], robot_orientation) + +pose_home = Pose([3, 1.7, 0], robot_orientation) + +pub_nlp = rospy.Publisher('/startListener', String, queue_size=10) + + +class HumanDescription: + + def __init__(self, name, fav_drink): + # TODO: coordinate with Perception on what is easy to implement + # characteristics to consider: height, hair color, and age. + self.human_pose = Fluent() + self.name = name + self.fav_drink = fav_drink # self.shirt_color = shirt_color # self.gender = gender + + self.human_pose_sub = rospy.Subscriber("/human_pose", String, self.human_pose_cb) + + def human_pose_cb(self, HumanPoseMsg): + self.human_pose.set_value(HumanPoseMsg.data) + + + +def demo_test(area): + with real_robot: + # Guest arrives: + TalkingMotion("Hello, i am Toya let me open my eyes for you").resolve().perform() + # Perception, detect first guest -> First detect guest, then listen + DetectAction(technique='default', state='start').resolve().perform() + + host = HumanDescription("Bob", fav_drink="Coffee") + + # While loop, human is detected + while host.human_pose: + TalkingMotion("Please step in front of me").resolve.perform() + rospy.sleep(5) + + rospy.loginfo("human detected") + + # look at guest and introduction + giskardpy.move_head_to_human() + TalkingMotion("Hello, i am Toya and my favorite drink is oil. What about you, talk to me?").resolve().perform() + + # reicht sleep 1? + rospy.sleep(1) + + # signal to start listening + pub_nlp.publish("start listening") + # TODO: How to erst weiter machen, wenn Knowledge Daten geschickt hat + + TalkingMotion("Hey i will stop looking now").resolve().perform() + + rospy.loginfo("stop looking now") + giskardpy.stop_looking() + rospy.loginfo("Navigating now") + TalkingMotion("navigating to couch area now, pls step away").resolve().perform() + + if area == 'to_couch': + NavigateAction([pose_kitchen_to_couch]).resolve().perform() + NavigateAction([pose_couch]).resolve().perform() + elif area == 'from_couch': + NavigateAction([pose_from_couch]).resolve().perform() + NavigateAction([pose_home]).resolve().perform() + + # failure handling + # rospy.Subscriber("nlp_feedback", Bool, talk_error) + + # receives name and drink via topic + # rospy.Subscriber("nlp_out", String, talk_request) + + +def nav_test(): + with real_robot: + robot_orientation = axis_angle_to_quaternion([0, 0, 1], 90) + test_pose1 = Pose([4.2, 3, 0], robot_orientation) + test_pose = Pose([3, 5, 0], [0, 0, 0, 1]) + moveBase.queryPoseNav(test_pose1) + moveBase.queryPoseNav(test_pose) + +# demo_test('from_couch') +# demo_test('to_couch') + +# receives name and drink via topic +# rospy.Subscriber("nlp_out", String, talk_request) + + +# 1. rasa run --enable-api -> start Rasa Server +# 2. python3 activate_language_processing.py -> NLP +# 3. roslaunch suturo_bringup suturo_bringup.launch -> Map +# 4. roslaunch_hsr_velocity_controller unloas_my_controller.launch +# 5. roslaunch giskardpy giskardpy_hsr_real_vel.launch -> Giskard +# starten +# 6. rosrun robokudo main.py _ae=humandetection_demo_ros_pkg=milestone1 -> Perception +# 7. run demo in Pycharm -> Planning diff --git a/demos/pycram_receptionist/Ms2_testable_stuff.py b/demos/pycram_receptionist_demo/Ms2_testable_stuff.py similarity index 81% rename from demos/pycram_receptionist/Ms2_testable_stuff.py rename to demos/pycram_receptionist_demo/Ms2_testable_stuff.py index 1d599abfa..83221fc56 100644 --- a/demos/pycram_receptionist/Ms2_testable_stuff.py +++ b/demos/pycram_receptionist_demo/Ms2_testable_stuff.py @@ -1,21 +1,15 @@ import rospy -from geometry_msgs.msg import PoseStamped -from robokudo_msgs.msg import QueryActionGoal -from pycram.designators.action_designator import DetectAction, LookAtAction, NavigateAction -from pycram.designators.motion_designator import TalkingMotion, MoveMotion -from pycram.external_interfaces import robokudo +from pycram.designators.action_designator import DetectAction, NavigateAction +from pycram.designators.motion_designator import TalkingMotion from pycram.helper import axis_angle_to_quaternion -from pycram.process_module import simulated_robot, with_simulated_robot, real_robot, with_real_robot, semi_real_robot +from pycram.process_module import real_robot import pycram.external_interfaces.giskard as giskardpy -from pycram.ros.robot_state_updater import RobotStateUpdater from pycram.ros.viz_marker_publisher import VizMarkerPublisher from pycram.designators.location_designator import * from pycram.designators.object_designator import * -from pycram.enums import ObjectType from pycram.bullet_world import BulletWorld, Object -from pycram.external_interfaces.knowrob import instances_of, get_guest_info -from std_msgs.msg import String, Bool -import talk_actions +from std_msgs.msg import String +from demos.pycram_receptionist_demo.deprecated import talk_actions import pycram.external_interfaces.navigate as moveBase world = BulletWorld("DIRECT") # /pycram/viz_marker topic bei Marker Array @@ -98,12 +92,16 @@ def demo_test(area): pose_home = Pose([3, 1.7, 0], robot_orientation) + DetectAction(technique='default', state='start').resolve().perform() # Perception, detect first guest - perceived_object_dict = DetectAction(BelieveObject(types=[milk.type]), technique='human').resolve().perform() - #while perceived_object_dict[0] is None: - # rospy.sleep(5) - # TalkingMotion("Please step in front of me") - # rospy.sleep(5) + #todo richtigen fluent erstellen + fluentvariable = True + while not fluentvariable: + + #TODO: funktioniert Designator in While Bedingung??? + TalkingMotion("Please step in front of me").resolve.perform() + rospy.sleep(5) + rospy.loginfo("human detected") @@ -112,18 +110,14 @@ def demo_test(area): TalkingMotion("Hello, i am Toya and my favorite drink is oil. What about you, talk to me?").resolve().perform() # reicht sleep 1? - rospy.sleep(10) + rospy.sleep(1) # signal to start listening - #pub_nlp.publish("start listening") - + pub_nlp.publish("start listening") + #TODO: How to erst weiter machen, wenn Knowledge Daten geschickt hat TalkingMotion("Hey i will stop looking now").resolve().perform() - # TalkingMotion("Hello, i will stop looking at you now").resolve().perform() - # rospy.sleep(2) - # rospy.loginfo("stop looking now") - # giskardpy.stop_looking() rospy.loginfo("stop looking now") giskardpy.stop_looking() @@ -158,7 +152,7 @@ def nav_test(): -demo_test('from_couch') +#demo_test('from_couch') #demo_test('to_couch') # receives name and drink via topic diff --git a/demos/pycram_receptionist_demo/School_Demo.py b/demos/pycram_receptionist_demo/School_Demo.py new file mode 100644 index 000000000..6013ce5ac --- /dev/null +++ b/demos/pycram_receptionist_demo/School_Demo.py @@ -0,0 +1,103 @@ +import rospy +from pycram.designators.action_designator import DetectAction, NavigateAction +from pycram.designators.motion_designator import TalkingMotion +from pycram.process_module import real_robot +import pycram.external_interfaces.giskard as giskardpy +from pycram.ros.viz_marker_publisher import VizMarkerPublisher +from pycram.designators.location_designator import * +from pycram.designators.object_designator import * +from pycram.bullet_world import BulletWorld, Object +from std_msgs.msg import String, Bool +from demos.pycram_receptionist_demo.deprecated import talk_actions +from pycram.helper import axis_angle_to_quaternion + +world = BulletWorld("DIRECT") +# /pycram/viz_marker topic bei Marker Array +v = VizMarkerPublisher() + +world.set_gravity([0, 0, -9.8]) +robot = Object("hsrb", "robot", "../../resources/" + robot_description.name + ".urdf") +robot_desig = ObjectDesignatorDescription(names=["hsrb"]).resolve() +robot.set_color([0.5, 0.5, 0.9, 1]) +kitchen = Object("kitchen", "environment", "kitchen.urdf") +robot.set_joint_state(robot_description.torso_joint, 0.24) +kitchen_desig = ObjectDesignatorDescription(names=["kitchen"]) +milk = Object("Milkpack", "milk", "milk.stl", pose=Pose([-2.7, 2.3, 0.43]), color=[1, 0, 0, 1]) + +giskardpy.init_giskard_interface() +# giskardpy.sync_worlds() +# RobotStateUpdater("/tf", "/joint_states") + + +class HumanDescription: + + def __init__(self, name, fav_drink, shirt_color, gender): + self.name = name + self.fav_drink = fav_drink + self.shirt_color = shirt_color + self.gender = gender + # TODO: coordinate with Perception on what is easy to implement + # characteristics to consider: height, hair color, and age. + + +def talk_request(data: String): + """ + callback function that takes the data from nlp (name and drink) and lets the robot talk + :param data: String "name drink" + """ + + name_drink = data.data.split(" ") + talk_actions.name_drink_talker(name_drink) + rospy.loginfo("nlp data:" + name_drink[0] + " " + name_drink[1]) + + +def talk_error(data): + """ + callback function if no name/drink was heard + """ + + error_msgs = "i could not hear you, please repeat" + TalkingMotion(error_msgs).resolve().perform() + pub_nlp.publish("start listening") + + +with real_robot: + # Variables + robot_orientation = axis_angle_to_quaternion([0, 0, 1], 90) + pose_kitchen_to_couch = Pose([4.2, 3, 0], robot_orientation) + robot_orientation_couch = axis_angle_to_quaternion([0, 0, 1], 0) + pose_couch = Pose([3, 5, 0], robot_orientation_couch) + + # Perception + DetectAction(technique='default', state='start').resolve().perform() + rospy.sleep(2) + rospy.loginfo("human detected") + + # NLP + pub_nlp = rospy.Publisher('/startListener', String, queue_size=10) + TalkingMotion("Hello, i am Toya and my favorite drink is oil. What about you, talk to me?").resolve().perform() + rospy.sleep(2) + # signal to start listening + pub_nlp.publish("start listening") + + # Manipulation + # keep looking at detected human + giskardpy.move_head_to_human() + + # failure handling + rospy.Subscriber("nlp_feedback", Bool, talk_error) + + # receives name and drink via topic + rospy.Subscriber("nlp_out", String, talk_request) + + TalkingMotion("please follow me into the living room").resolve.perform() + + # Lead human to living room after introduction + giskardpy.stop_looking() + + NavigateAction([pose_kitchen_to_couch]).resolve().perform() + NavigateAction([pose_couch]).resolve().perform() + + TalkingMotion("take a seat, it was nice meeting you") + + rospy.loginfo("End of Demo") diff --git a/demos/pycram_receptionist_demo/__init__.py b/demos/pycram_receptionist_demo/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/demos/pycram_receptionist/M1_Demo.py b/demos/pycram_receptionist_demo/deprecated/M1_Demo.py similarity index 100% rename from demos/pycram_receptionist/M1_Demo.py rename to demos/pycram_receptionist_demo/deprecated/M1_Demo.py diff --git a/demos/pycram_receptionist/talk_actions.py b/demos/pycram_receptionist_demo/deprecated/talk_actions.py similarity index 100% rename from demos/pycram_receptionist/talk_actions.py rename to demos/pycram_receptionist_demo/deprecated/talk_actions.py diff --git a/demos/pycram_receptionist_demo/fluent.py b/demos/pycram_receptionist_demo/fluent.py new file mode 100644 index 000000000..e69de29bb diff --git a/demos/pycram_receptionist/random.py b/demos/pycram_receptionist_demo/random.py similarity index 100% rename from demos/pycram_receptionist/random.py rename to demos/pycram_receptionist_demo/random.py diff --git a/demos/pycram_receptionist_demo/tests/Humanfluent.py b/demos/pycram_receptionist_demo/tests/Humanfluent.py new file mode 100644 index 000000000..44f21324f --- /dev/null +++ b/demos/pycram_receptionist_demo/tests/Humanfluent.py @@ -0,0 +1,35 @@ +import rospy + +from pycram.fluent import Fluent +from pycram.process_module import real_robot +from std_msgs.msg import String + + +class HumanDescription: + + def __init__(self, name, fav_drink): + # TODO: coordinate with Perception on what is easy to implement + # characteristics to consider: height, hair color, and age. + self.human_pose = Fluent() + self.name = name + self.fav_drink = fav_drink # self.shirt_color = shirt_color # self.gender = gender + + self.human_pose_sub = rospy.Subscriber("/human_pose", String, self.human_pose_cb) + + def human_pose_cb(self, HumanPoseMsg): + self.human_pose.set_value(HumanPoseMsg.data) + + +with real_robot: + host = HumanDescription("Bob", fav_drink="Coffee") + #detect + # Perception, detectfirst guest + # If detect true then + + host.human_pose.set_value("True") + + # While loop, human is detected + while host.human_pose.get_value(): + print(host.human_pose.get_value()) + + rospy.loginfo("human not detected anymore") \ No newline at end of file diff --git a/demos/pycram_receptionist_demo/utils/misc.py b/demos/pycram_receptionist_demo/utils/misc.py new file mode 100644 index 000000000..815ff30c4 --- /dev/null +++ b/demos/pycram_receptionist_demo/utils/misc.py @@ -0,0 +1,48 @@ +import rospy +from pycram.designators.action_designator import NavigateAction +from pycram.designators.motion_designator import TalkingMotion +import pycram.external_interfaces.giskard as giskardpy +from pycram.designators.object_designator import * +from std_msgs.msg import String +from demos.pycram_receptionist_demo.deprecated import talk_actions +from deprecated import deprecated + + + +@deprecated(reason="Newst version uses the knowledge interface") +def talk_request(data: String): + """ + callback function that takes the data from nlp (name and drink) and lets the robot talk + :param data: String "name drink" + """ + + name_drink = data.data.split(" ") + talk_actions.name_drink_talker(name_drink) + rospy.loginfo("nlp data:" + name_drink[0] + " " + name_drink[1]) + + rospy.loginfo("stop looking now") + giskardpy.stop_looking() + rospy.loginfo("Navigating now") + NavigateAction([Pose([3, 5, 0], [0, 0, 1, 1])]).resolve().perform() + + +@deprecated(reason="Newst version uses the knowledge interface") +def talk_error(data): + """ + callback function if no name/drink was heard + """ + + error_msgs = "i could not hear you, please repeat" + TalkingMotion(error_msgs).resolve().perform() + pub_nlp.publish("start listening") + + +@deprecated(reason="Newst version uses the knowledge interface") +def introduce(name1, drink1, name2, drink2): + """ + Text for robot to introduce two people to each other + """ + first = "Hey" + name2 + " This is " + name1 + "and the favorite drink of your guest is " + drink1 + second = name1 + "This is " + name2 + "his favorite drink is " + drink2 + TalkingMotion(first) + TalkingMotion(second) diff --git a/demos/pycram_suturo/demo-test.py b/demos/pycram_suturo/demo-test.py deleted file mode 100644 index 22d02e1a3..000000000 --- a/demos/pycram_suturo/demo-test.py +++ /dev/null @@ -1,13 +0,0 @@ -from pycram.bullet_world import BulletWorld, Object -from pycram.enums import ObjectType -from pycram.external_interfaces import giskard -from pycram.process_module import simulated_robot, with_simulated_robot -from pycram.designators.action_designator import * -from pycram.enums import Arms -from pycram.designators.object_designator import * -from pycram.designators.object_designator import BelieveObject -import pycram.helper as helper - -world = BulletWorld() -robot = Object("pr2", "robot", "../../resources/" + robot_description.name + ".urdf") -giskard.achieve_joint_goal({"torso_lift_joint": 0.28}) \ No newline at end of file diff --git a/demos/pycram_suturo/hsr-pick-place-test.py b/demos/pycram_suturo/hsr-pick-place-test.py deleted file mode 100644 index 79c64a71f..000000000 --- a/demos/pycram_suturo/hsr-pick-place-test.py +++ /dev/null @@ -1,148 +0,0 @@ -import rospy - -from pycram.designators.action_designator import * -from pycram.designators.location_designator import * -from pycram.designators.object_designator import * -from pycram.designators.motion_designator import * -from pycram.pose import Pose -from pycram.bullet_world import BulletWorld, Object -from pycram.process_module import simulated_robot, with_simulated_robot, semi_real_robot, real_robot -from pycram.enums import ObjectType -from pycram.ros.robot_state_updater import RobotStateUpdater -from pycram.ros.viz_marker_publisher import VizMarkerPublisher -import pycram.external_interfaces.giskard as giskardpy -from pycram.external_interfaces.robokudo import queryEmpty, queryHuman -world = BulletWorld("DIRECT") -v = VizMarkerPublisher() -robot = Object("hsrb", ObjectType.ROBOT, "hsrb.urdf", pose=Pose([1, 2, 0])) -# human = Object("human", ObjectType.MILK, "human_male.stl", pose=Pose([0, 0, 0])) - -#apartment = Object("kitchen", ObjectType.ENVIRONMENT, "kitchen.urdf") -# turn robot and some object for 90 degrees -object_orientation = axis_angle_to_quaternion([0, 0, 1], 90) -#host = Object("humanhost", ObjectType.HUMAN, "human_male.stl", pose=Pose([-2.5, 1.5, 0])) -#mug = Object("milk", "milk", "milk.stl", pose=Pose([-2.7, 2.5, 0.43]), color=[1, 0, 0, 1]) -cereal = Object("cereal", "cereal", "breakfast_cereal.stl", pose=Pose([-2.5, 1.5, 0.43], object_orientation), - color=[0, 1, 0, 1]) -# metalmug = Object("metalmug", "metalmug", "bowl.stl", pose=Pose([-2.9, 2.3, 0.43], object_orientation), color=[0, 1, 0, 1]) -# cheezels = Object("cheezels", "cheezels", "breakfast_cereal.stl", pose=Pose([-2.3, 2.3, 0.43], object_orientation), color=[0, 1, 0, 1]) -# pringles = Object("pringles", "pringles", "milk.stl", pose=Pose([-3.1, 2.3, 0.43]), color=[0, 1, 0, 1]) - -# -#milk_desig = BelieveObject(names=["milk"]) -cereal_desig = BelieveObject(names=["cereal"]) -# metalmug_desig = BelieveObject(names=["metalmug"]) -# cheezels_desig = BelieveObject(names=["cheezels"]) -# pringles_desig = BelieveObject(names=["pringles"]) - -robot.set_color([0.5, 0.5, 0.9, 1]) -robot_desig = BelieveObject(names=["hsr"]) -apartment_desig = BelieveObject(names=["kitchen"]) - -#object_list = [pringles_desig, metalmug_desig, milk_desig, cereal_desig, cheezels_desig] - - -# # initialize and sync giskard -giskardpy.init_giskard_interface() -#giskardpy.sync_worlds() -# -#RobotStateUpdater("/tf", "/joint_states") -# - -# @with_simulated_robot -def move_and_detect(obj): - # NavigateAction(target_locations=[Pose([-2.6, 0.8, 0], object_orientation)]).resolve().perform() - # LookAtAction(targets=[obj.pose]).resolve().perform() - #LookAtAction(targets=[obj.pose]).resolve().perform() - - # unkown_obj = BelieveObject(names=[ObjectType.MILK]) - # detected_objects= DetectingMotion(BelieveObject(types=[obj.type]), technique="all").resolve().perform() - # print(detected_objects) - # print("done") - # rospy.loginfo("detected objects: {}".format(detected_objects)) - object_desig = DetectAction(BelieveObject(types=[obj.type]), technique='all').resolve().perform() - #print(list(object_desig)) - print(list(object_desig)[0]) - # print(object_desig.values()[0].name) - return object_desig - - -with real_robot: - #query=queryHuman() - #print(query) - #human = DetectingMotion(object_type=ObjectType.SPOON, - # technique="human").resolve().perform() - #print(human) - #object_desig = DetectAction(BelieveObject(types=[cereal.type]), technique='human').resolve().perform() - TalkingMotion("test").resolve().perform() - # ParkArmsAction([Arms.LEFT]).resolve().perform() - dict_desig = move_and_detect(obj=cereal) - # MoveTorsoAction([0.25]).resolve().perform() - #PickUpAction(dict_desig['Crackerbox'], ["left"], ["top"]).resolve().perform() - print("starting mit giskard") - giskardpy.initial_adding_objects() - giskardpy.spawn_kitchen() - print("done") - #PickUpAction(dict_desig['Crackerbox'], ["left"], ["left"]).resolve().perform() - # ParkArmsAction([Arms.LEFT]).resolve().perform() - # PlaceAction(dict_desig['cereal'], [Pose([-2.3, 2.2, 0.43], object_orientation)], ["left"]).resolve().perform() - # MoveTorsoAction([0.25]).resolve().perform() - # ParkArmsAction([Arms.LEFT]).resolve().perform() - # #todo parkarms does not move the torso atm - # NavigateAction(target_locations=[Pose([-2.7, 0, 0.43], object_orientation)]).resolve().perform() - -# rospy.loginfo("milk pose: {}".format(milk_desig.pose)) -# -# cereal_desig = move_and_detect(obj=cereal) -# rospy.loginfo("cereal pose: {}".format(cereal_desig.pose)) -# -# milk_desig = move_and_detect(ObjectType.MILK) -# -# TransportAction(milk_desig, ["left"], [Pose([4.8, 3.55, 0.8])]).resolve().perform() -# -# cereal_desig = move_and_detect(ObjectType.BREAKFAST_CEREAL) -# -# TransportAction(cereal_desig, ["right"], [Pose([5.2, 3.4, 0.8], [0, 0, 1, 1])]).resolve().perform() -# -# bowl_desig = move_and_detect(ObjectType.BOWL) -# -# TransportAction(bowl_desig, ["left"], [Pose([5, 3.3, 0.8], [0, 0, 1, 1])]).resolve().perform() -# -# # Finding and navigating to the drawer holding the spoon -# handle_desig = ObjectPart(names=["handle_cab10_t"], part_of=apartment_desig.resolve()) -# drawer_open_loc = AccessingLocation(handle_desig=handle_desig.resolve(), robot_desig=robot_desig.resolve()).resolve() -# -# NavigateAction([drawer_open_loc.pose]).resolve().perform() -# -# OpenAction(object_designator_description=handle_desig, arms=[drawer_open_loc.arms[0]]).resolve().perform() -# spoon.detach(apartment) -# -# # Detect and pickup the spoon -# LookAtAction([apartment.get_link_pose("handle_cab10_t")]).resolve().perform() -# -# spoon_desig = DetectAction(BelieveObject(types=[ObjectType.SPOON])).resolve().perform() -# -# pickup_arm = "left" if drawer_open_loc.arms[0] == "right" else "right" -# PickUpAction(spoon_desig, [pickup_arm], ["top"]).resolve().perform() -# -# ParkArmsAction([Arms.BOTH]).resolve().perform() -# -# close_loc = drawer_open_loc.pose -# close_loc.position.y += 0.1 -# NavigateAction([close_loc]).resolve().perform() -# -# CloseAction(object_designator_description=handle_desig, arms=[drawer_open_loc.arms[0]]).resolve().perform() -# -# ParkArmsAction([Arms.BOTH]).resolve().perform() -# -# MoveTorsoAction([0.15]).resolve().perform() -# -# # Find a pose to place the spoon, move and then place it -# spoon_target_pose = Pose([4.85, 3.3, 0.8], [0, 0, 1, 1]) -# placing_loc = CostmapLocation(target=spoon_target_pose, reachable_for=robot_desig.resolve()).resolve() -# -# NavigateAction([placing_loc.pose]).resolve().perform() -# -# PlaceAction(spoon_desig, [spoon_target_pose], [pickup_arm]).resolve().perform() -# -# ParkArmsAction([Arms.BOTH]).resolve().perform() diff --git a/demos/pycram_suturo/hsrtest.py b/demos/pycram_suturo/hsrtest.py deleted file mode 100644 index ae83b6014..000000000 --- a/demos/pycram_suturo/hsrtest.py +++ /dev/null @@ -1,102 +0,0 @@ -from pycram.process_module import simulated_robot, real_robot, semi_real_robot -from pycram.designators.action_designator import * -from pycram.enums import Arms -from pycram.designators.object_designator import * -from pycram.designators.object_designator import BelieveObject -import pycram.helper as helper -import pycram.external_interfaces.giskard as giskardpy -from pycram.fluent_language_misc import failure_handling -import threading -from pycram.ros.viz_marker_publisher import VizMarkerPublisher -from pycram.plan_failures import TorsoFailure -import sys -#from pycram.ros.tf_broadcaster import TFBroadcaster - - -from pycram.ros.robot_state_updater import RobotStateUpdater -#from pycram.ros.joint_state_publisher import JointStatePublisher - -print(sys.meta_path) -world = BulletWorld("DIRECT") -v = VizMarkerPublisher() - -#broadcaster = TFBroadcaster() -#joint_publisher = JointStatePublisher("joint_states", 0.1) - -world.set_gravity([0, 0, -9.8]) -robot = Object("hsrb", "robot", "../../resources/" + robot_description.name + ".urdf") -robot_desig = ObjectDesignatorDescription(names=["hsrb"]).resolve() -robot.set_color([0.5, 0.5, 0.9, 1]) - -kitchen = Object("kitchen", "environment", "test-room.urdf") -robot.set_joint_state(robot_description.torso_joint, 0.24) -kitchen_desig = ObjectDesignatorDescription(names=["kitchen"]) -spawning_poses = { - # 'bigknife': Pose([-0.95, 1.2, 1.3], [1, -1, 1, -1]), - 'bigknife': Pose([0.9, 0.6, 0.5], [0, 0, 0, -1]), - # 'bread': Pose([-0.85, 0.9, 0.90], [0, 0, -1, 1]) - 'bread': Pose([-0.85, 0.9, 0.90], [0, 0, -1, -1]), - 'board': Pose([-0.85, 0.9, 0.85], [0, 0, -1, -1]), - 'cocumber': Pose([-0.85, 0.9, 0.87], [0, 0, -1, -1]) -} -bigknife = Object("bigknife", "bigknife", "big-knife.stl", spawning_poses["bigknife"]) -cocumber = Object("cocumber", "cocumber", "cocumber.stl", spawning_poses["cocumber"]) -board = Object("board", "board", "board.stl", spawning_poses["board"]) -cocumber.set_color([0, 1, 0.04, 1]) -board.set_color([0.4, 0.2, 0.06, 1]) -bigknife_BO = BelieveObject(names=["bigknife"]) -bread_BO = BelieveObject(names=["bread"]) -cocumber_BO = BelieveObject(names=["cocumber"]) -giskardpy.init_giskard_interface() -giskardpy.sync_worlds() -RobotStateUpdater("/tf", "/giskard_joint_states") - -# giskardpy.achieve_joint_goal({"torso_lift_joint": 0.28}) -import random - -# Example usage: -# @failure_handling(5) -# def some_function(): -# numerator = 10 -# denominator = random.choice([0]) # Randomly chooses 0, 1, or 2 -# result = numerator / denominator # This might raise a ZeroDivisionError -# print(f"Result is {result}") -# return result - - -def move_object(): - # Move to sink - with par as s: - # MoveTorsoAction([0.33]).resolve().perform() - print("test") - print("test1") - - -with semi_real_robot: - #some_function() - ParkArmsAction([Arms.LEFT]).resolve().perform() - - MoveTorsoAction([0.33]).resolve().perform() - giskardpy.sync_worlds() - grasp = robot_description.grasps.get_orientation_for_grasp("top") - arm = "left" - # print("pickuppose") - # pickup_pose_knife = CostmapLocation(target=bigknife_BO.resolve(), reachable_for=robot_desig).resolve() - # print("nav") - # pickup_arm = pickup_pose_knife.reachable_arms[0] - pipose = Pose([0.4, 0.6, 0], [0, 0, 0, -1]) - NavigateAction(target_locations=[pipose]).resolve().perform() - print("pickup") - PickUpAction(object_designator_description=bigknife_BO, - arms=["left"], - grasps=["top"]).resolve().perform() - print("park") - # - ParkArmsAction([Arms.LEFT]).resolve().perform() - original_quaternion = (0, 0, 0, 1) - rotation_axis = (0, 0, 1) - rotation_quaternion = helper.axis_angle_to_quaternion(rotation_axis, 180) - resulting_quaternion = helper.multiply_quaternions(original_quaternion, rotation_quaternion) - nav_pose = Pose([-0.3, 0.9, 0.0], resulting_quaternion) - NavigateAction(target_locations=[nav_pose]).resolve().perform() - LookAtAction(targets=[cocumber_BO.resolve().pose]).resolve().perform() diff --git a/demos/pycram_suturo/oritest.py b/demos/pycram_suturo/oritest.py deleted file mode 100644 index 86b52e9d7..000000000 --- a/demos/pycram_suturo/oritest.py +++ /dev/null @@ -1,59 +0,0 @@ -import rospy - -from pycram.designators.action_designator import * -from pycram.designators.location_designator import * -from pycram.designators.object_designator import * -from pycram.designators.motion_designator import * -from pycram.pose import Pose -from pycram.bullet_world import BulletWorld, Object -from pycram.process_module import simulated_robot, with_simulated_robot, semi_real_robot, real_robot -from pycram.enums import ObjectType -from pycram.ros.robot_state_updater import RobotStateUpdater -from pycram.ros.viz_marker_publisher import VizMarkerPublisher -import pycram.external_interfaces.giskard as giskardpy -from pycram.external_interfaces.robokudo import queryEmpty, queryHuman -world = BulletWorld("DIRECT") -v = VizMarkerPublisher() -robot = Object("hsrb", ObjectType.ROBOT, "hsrb.urdf", pose=Pose([1, 2, 0])) -# human = Object("human", ObjectType.MILK, "human_male.stl", pose=Pose([0, 0, 0])) - -apartment = Object("kitchen", ObjectType.ENVIRONMENT, "kitchen.urdf") -# turn robot and some object for 90 degrees -object_orientation = axis_angle_to_quaternion([0, 0, 1], 90) -cereal = Object("cereal", "cereal", "breakfast_cereal.stl", pose=Pose([-2.5, 1.5, 0.43]), - color=[0, 1, 0, 1]) - -# -cereal_desig = BelieveObject(names=["cereal"]) - -robot.set_color([0.5, 0.5, 0.9, 1]) -robot_desig = BelieveObject(names=["hsr"]) -apartment_desig = BelieveObject(names=["kitchen"]) - -giskardpy.init_giskard_interface() -#todo make a function that removes giskard stuff without bullet -#giskardpy.removing_of_objects() -#giskardpy.sync_worlds() - -RobotStateUpdater("/tf", "/joint_states") - - - -with semi_real_robot: - print("starting mit giskard") - lt = LocalTransformer() - #NavigateAction(target_locations=[Pose([0, 0, 0])]).resolve().perform() - gripper_name = robot_description.get_tool_frame("left") - posetest = Pose([0, 0, 0], [0, 0, 0, 1]) - gri = BulletWorld.robot.get_link_tf_frame(gripper_name) - base_name = robot.get_link_tf_frame("base_link") - while True: - print(lt.lookupTransform(BulletWorld.robot.get_link_tf_frame(gripper_name),"map", rospy.Time(0))) - - #print(lt.transform_pose(posetest, gri)) - # print(robot.get_link_pose(gripper_name)) - # for i in robot.get_complete_joint_state(): - # print(i) - - #print(robot.get_complete_joint_state()) - #print(robot.get_joint_state("hand_palm_joint")) diff --git a/demos/pycram_suturo/real-pick.py b/demos/pycram_suturo/real-pick.py deleted file mode 100644 index 057646c1b..000000000 --- a/demos/pycram_suturo/real-pick.py +++ /dev/null @@ -1,106 +0,0 @@ -import rospy -from tmc_control_msgs.msg import GripperApplyEffortActionGoal - -from pycram.designators.action_designator import * -from pycram.designators.location_designator import * -from pycram.designators.object_designator import * -from pycram.designators.motion_designator import * -from pycram.pose import Pose -from pycram.bullet_world import BulletWorld, Object -from pycram.process_module import simulated_robot, with_simulated_robot, semi_real_robot, real_robot -from pycram.enums import ObjectType -from pycram.ros.robot_state_updater import RobotStateUpdater -from pycram.ros.viz_marker_publisher import VizMarkerPublisher -import pycram.external_interfaces.giskard as giskardpy -from pycram.external_interfaces.robokudo import queryEmpty, queryHuman - -world = BulletWorld("DIRECT") -v = VizMarkerPublisher() -robot = Object("hsrb", ObjectType.ROBOT, "hsrb.urdf", pose=Pose([1, 2, 0])) -# human = Object("human", ObjectType.MILK, "human_male.stl", pose=Pose([0, 0, 0])) - -# apartment = Object("kitchen", ObjectType.ENVIRONMENT, "kitchen.urdf") -# turn robot and some object for 90 degrees -giskardpy.init_giskard_interface() -# giskardpy.sync_worlds() - -object_orientation = axis_angle_to_quaternion([0, 0, 1], 90) -cereal = Object("cereal", "cereal", "breakfast_cereal.stl", pose=Pose([0, 0, 0]), - color=[0, 1, 0, 1]) - -cereal_desig = BelieveObject(names=["cereal"]) - -robot.set_color([0.5, 0.5, 0.9, 1]) -robot_desig = BelieveObject(names=["hsr"]) -apartment_desig = BelieveObject(names=["kitchen"]) - -# todo make a function that removes giskard stuff without bullet -# giskardpy.removing_of_objects() - -RobotStateUpdater("/tf", "/giskard_joint_states") - - -def move_and_detect(obj): - # NavigateAction(target_locations=[Pose([-2.6, 0.8, 0], object_orientation)]).resolve().perform() - # LookAtAction(targets=[obj.pose]).resolve().perform() - # LookAtAction(targets=[obj.pose]).resolve().perform() - object_desig = DetectAction(BelieveObject(types=[obj.type]), technique='all').resolve().perform() - - return object_desig - - -with real_robot: - TalkingMotion("Starting Demo").resolve().perform() - ParkArmsAction([Arms.LEFT]).resolve().perform() - NavigateAction(target_locations=[Pose([3.9, 2.2, 0])]).resolve().perform() - MoveTorsoAction([0.25]).resolve().perform() - LookAtAction(targets=[Pose([4.9, 2.2, 0.18])]).resolve().perform() - - - def open_gripper(): - pub_nlp = rospy.Publisher('/hsrb/gripper_controller/grasp/goal', GripperApplyEffortActionGoal, - queue_size=10) - rate = rospy.Rate(10) # 10hz - rospy.sleep(2) - msg = GripperApplyEffortActionGoal() - msg.goal.effort = 0.8 - pub_nlp.publish(msg) - - - def close_gripper(): - pub_nlp = rospy.Publisher('/hsrb/gripper_controller/grasp/goal', GripperApplyEffortActionGoal, - queue_size=10) - rate = rospy.Rate(10) # 10hz - rospy.sleep(2) - msg = GripperApplyEffortActionGoal() - msg.goal.effort = -0.8 - pub_nlp.publish(msg) - - - - TalkingMotion("Perceiving Now").resolve().perform() - object_desig = DetectAction(BelieveObject(types=[cereal.type]), technique='all').resolve().perform() - - obj_list = [object_desig['Cerealbox'], object_desig['Pringleschipscan'], object_desig['Milkpack'], - object_desig['Crackerbox'], object_desig['Metalmug']] - - giskardpy.initial_adding_objects() - giskardpy.spawn_kitchen() - LookAtAction(targets=[Pose([4.9, 2.2, 1.1])]).resolve().perform() - - for obj in obj_list: - new_pose = obj.pose - TalkingMotion("Navigating now").resolve().perform() - NavigateAction(target_locations=[Pose([4.2, new_pose.position.y, 0])]).resolve().perform() - - TalkingMotion("Calling Giskard now").resolve().perform() - PickUpAction(obj, ["left"], ["front"]).resolve().perform() - - TalkingMotion("Picked up Dropping in 3").resolve().perform() - TalkingMotion("2").resolve().perform() - TalkingMotion("1").resolve().perform() - open_gripper() - rospy.sleep(5) - ParkArmsAction([Arms.LEFT]).resolve().perform() - - TalkingMotion("I am done").resolve().perform() diff --git a/demos/pycram_suturo/run.py b/demos/pycram_suturo/run.py deleted file mode 100644 index 965ebf303..000000000 --- a/demos/pycram_suturo/run.py +++ /dev/null @@ -1,5 +0,0 @@ -import sys -import os -sys.path.append(os.getcwd() + "/../../src/") -import macropy.activate -import hsrtest \ No newline at end of file diff --git a/demos/pycram_suturo/semi-real-pick-test.py b/demos/pycram_suturo/semi-real-pick-test.py deleted file mode 100644 index bc320434b..000000000 --- a/demos/pycram_suturo/semi-real-pick-test.py +++ /dev/null @@ -1,70 +0,0 @@ -import rospy - -from pycram.designators.action_designator import * -from pycram.designators.location_designator import * -from pycram.designators.object_designator import * -from pycram.designators.motion_designator import * -from pycram.pose import Pose -from pycram.bullet_world import BulletWorld, Object -from pycram.process_module import simulated_robot, with_simulated_robot, semi_real_robot, real_robot -from pycram.enums import ObjectType -from pycram.ros.robot_state_updater import RobotStateUpdater -from pycram.ros.viz_marker_publisher import VizMarkerPublisher -import pycram.external_interfaces.giskard as giskardpy -from pycram.external_interfaces.robokudo import queryEmpty, queryHuman -world = BulletWorld("DIRECT") -v = VizMarkerPublisher() -robot = Object("hsrb", ObjectType.ROBOT, "hsrb.urdf", pose=Pose([1, 2, 0])) - -apartment = Object("kitchen", ObjectType.ENVIRONMENT, "test-room.urdf") -giskardpy.init_giskard_interface() -giskardpy.sync_worlds() - -object_orientation = axis_angle_to_quaternion([0, 0, 1], 90) -cereal = Object("cereal", "cereal", "breakfast_cereal.stl", pose=Pose([-2.5, 1.5, 0.43]), - color=[0, 1, 0, 1]) - -# -cereal_desig = BelieveObject(names=["cereal"]) - -robot.set_color([0.5, 0.5, 0.9, 1]) -robot_desig = BelieveObject(names=["hsr"]) -apartment_desig = BelieveObject(names=["kitchen"]) - - -#todo make a function that removes giskard stuff without bullet -#giskardpy.removing_of_objects() - -#we need to make this work with the real robot otherwise pick up action will not work -RobotStateUpdater("/tf", "/giskard_joint_states") - -def move_and_detect(obj): - NavigateAction(target_locations=[Pose([-2.6, 0.8, 0], object_orientation)]).resolve().perform() - LookAtAction(targets=[obj.pose]).resolve().perform() - LookAtAction(targets=[obj.pose]).resolve().perform() - object_desig = DetectAction(BelieveObject(types=[obj.type]), technique='default').resolve().perform() - - return object_desig - - -with semi_real_robot: - cereal_target_pose = Pose([-2.5, 2.3, 0.43], [0, 0, 1, 1]) - TalkingMotion("test").resolve().perform() - ParkArmsAction([Arms.LEFT]).resolve().perform() - dict_desig = move_and_detect(obj=cereal) - #giskardpy.initial_adding_objects() - #giskardpy.spawn_kitchen() - ParkArmsAction([Arms.LEFT]).resolve().perform() - TalkingMotion("Trying to pick up").resolve().perform() - PickUpAction((dict_desig["cereal"]), ["left"], ["top"]).resolve().perform() - ParkArmsAction([Arms.LEFT]).resolve().perform() - #NavigateAction(target_locations=[cereal_target_pose]).resolve().perform() - #PlaceAction(dict_desig['cereal'], [Pose(-2.7, 2.3, 0.43)], ["left"]).resolve().perform() - - - PlaceAction(dict_desig["cereal"], [cereal_target_pose], ["left"]).resolve().perform() - giskardpy.remove_object(dict_desig["cereal"].bullet_world_object) - ParkArmsAction([Arms.LEFT]).resolve().perform() - - print("done") - # #todo parkarms does not move the torso atm diff --git a/demos/pycram_suturo/test.py b/demos/pycram_suturo/test.py deleted file mode 100644 index 846d3d396..000000000 --- a/demos/pycram_suturo/test.py +++ /dev/null @@ -1,19 +0,0 @@ -import rospy - -from pycram.designators.action_designator import * -from pycram.designators.location_designator import * -from pycram.designators.object_designator import * -from pycram.designators.motion_designator import * -from pycram.pose import Pose -from pycram.bullet_world import BulletWorld, Object -from pycram.process_module import simulated_robot, with_simulated_robot, semi_real_robot -from pycram.enums import ObjectType -from pycram.ros.robot_state_updater import RobotStateUpdater -from pycram.ros.viz_marker_publisher import VizMarkerPublisher -import pycram.external_interfaces.giskard as giskardpy - - -world = BulletWorld("DIRECT") -v = VizMarkerPublisher() -robot = Object("hsrb", ObjectType.ROBOT, "hsrb.urdf", pose=Pose([1, 2, 0])) -apartment = Object("kitchen", ObjectType.ENVIRONMENT, "kitchen.urdf") \ No newline at end of file diff --git a/demos/pycram_test_demos/move_gripper_test.py b/demos/pycram_test_demos/move_gripper_test.py deleted file mode 100755 index ec2a429ec..000000000 --- a/demos/pycram_test_demos/move_gripper_test.py +++ /dev/null @@ -1,14 +0,0 @@ -import rospy -import pycram.external_interfaces.giskard as giskardpy -from pycram.designators.action_designator import * -from pycram.process_module import real_robot - -rospy.loginfo("init_interface") -giskardpy.init_giskard_interface() - -with real_robot: - MoveGripperMotion("open", "left", allow_gripper_collision=True).resolve().perform() - time.sleep(2) - MoveGripperMotion("neutral", "left", allow_gripper_collision=True).resolve().perform() - time.sleep(2) - MoveGripperMotion("close", "left", allow_gripper_collision=True).resolve().perform() diff --git a/demos/pycram_test_demos/navigate_action_test.py b/demos/pycram_test_demos/navigate_action_test.py deleted file mode 100644 index 8b3b5396b..000000000 --- a/demos/pycram_test_demos/navigate_action_test.py +++ /dev/null @@ -1,33 +0,0 @@ -from pycram.process_module import real_robot -from pycram.designators.action_designator import * -import pycram.external_interfaces.giskard as giskardpy -from pycram.ros.viz_marker_publisher import VizMarkerPublisher -from pycram.designators.object_designator import * -from pycram.enums import ObjectType -from pycram.ros.robot_state_updater import RobotStateUpdater -#07.12.23 worked with.... - -world = BulletWorld("DIRECT") -v = VizMarkerPublisher() - -world.set_gravity([0, 0, -9.8]) - - -# Initialize objects in BulletWorld -# TODO: Warum ist die Orientierung vom falschen Datentyp? Kann ich das ignorieren? -robot = Object("hsrb", ObjectType.ROBOT, "../../resources/" + "hsrb" + ".urdf") -robot.set_color([0.5, 0.5, 0.9, 1]) -robot_desig = ObjectDesignatorDescription(names=["hsrb"]).resolve() -kitchen = Object("kitchen", "environment", "kitchen.urdf") -robot.set_joint_state(robot_description.torso_joint, 0.24) -kitchen_desig = ObjectDesignatorDescription(names=["kitchen"]) - - -# Giskard initialisieren und syncen -giskardpy.init_giskard_interface() -#giskardpy.sync_worlds() - -#RobotStateUpdater("/tf", "/joint_states") - -with real_robot: - NavigateAction(target_locations=[Pose([3.7, 1.4, 0])]).resolve().perform() diff --git a/demos/pycram_test_demos/pick_up_test.py b/demos/pycram_test_demos/pick_up_test.py deleted file mode 100755 index 622190e5a..000000000 --- a/demos/pycram_test_demos/pick_up_test.py +++ /dev/null @@ -1,33 +0,0 @@ -from pycram.process_module import real_robot -from pycram.designators.action_designator import * -import pycram.external_interfaces.giskard as giskardpy -from pycram.designators.object_designator import * -from pycram.enums import ObjectType -from pycram.ros.viz_marker_publisher import VizMarkerPublisher - -world = BulletWorld("DIRECT") -v = VizMarkerPublisher() - -#broadcaster = TFBroadcaster() -#joint_publisher = JointStatePublisher("joint_states", 0.1) - -world.set_gravity([0, 0, -9.8]) -robot = Object("hsrb", "robot", "../../resources/" + robot_description.name + ".urdf") -robot_desig = ObjectDesignatorDescription(names=["hsrb"]).resolve() -robot.set_color([0.5, 0.5, 0.9, 1]) -object_mug = BelieveObject(names=["metalmug"]) -milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([2.5, 2, 1.02]), color=[1, 0, 0, 1]) - -# Giskard initialisieren -giskardpy.init_giskard_interface() - -with real_robot: - object_desig = DetectAction(BelieveObject(types=[ObjectType.MILK]), technique='all').resolve().perform() - - PickUpAction(object_designator_description=object_desig["Cerealbox"], - arms=["left"], - grasps=["front"]).resolve().perform() - - # PickUpAction(object_designator_description=object_desig, - # arms=["left"], - # grasps=["top"]).resolve().perform() diff --git a/demos/pycram_test_demos/pick_up_test_one_object.py b/demos/pycram_test_demos/pick_up_test_one_object.py deleted file mode 100755 index d218bacf8..000000000 --- a/demos/pycram_test_demos/pick_up_test_one_object.py +++ /dev/null @@ -1,72 +0,0 @@ -import rospy -from tmc_msgs.msg import Voice - -from pycram import pose -from pycram.external_interfaces import robokudo -from pycram.process_module import real_robot, semi_real_robot, with_real_robot -from pycram.designators.action_designator import * -import pycram.external_interfaces.giskard as giskardpy -from pycram.ros.viz_marker_publisher import VizMarkerPublisher -from pycram.designators.object_designator import * -from pycram.enums import ObjectType -from pycram.ros.robot_state_updater import RobotStateUpdater - -world = BulletWorld("DIRECT") -v = VizMarkerPublisher() - -world.set_gravity([0, 0, -9.8]) - -robo_orientation = axis_angle_to_quaternion([0, 0, 1], 90) -x = robo_orientation[0] -y = robo_orientation[1] -z = robo_orientation[2] -w = robo_orientation[3] - -cereal_orientation = axis_angle_to_quaternion([0, 0, 1], 90) - -# Initialize objects in BulletWorld -# TODO: Warum ist die Orientierung vom falschen Datentyp? Kann ich das ignorieren? -robot = Object("hsrb", ObjectType.ROBOT, "../../resources/" + "hsrb" + ".urdf") -robot.set_color([0.5, 0.5, 0.9, 1]) -robot_desig = ObjectDesignatorDescription(names=["hsrb"]).resolve() - -kitchen = Object("kitchen", "environment", "kitchen.urdf") -kitchen_desig = ObjectDesignatorDescription(names=["kitchen"]) - -milk = Object("milk", "milk", "milk.stl", pose=Pose([-2.7, 2.3, 0.43]), color=[1, 0, 0, 1]) -milk_desig = BelieveObject(types=["milk"]) - - -pub = rospy.Publisher('/talk_request', Voice, queue_size=10) -drop_str = Voice() -drop_str.language = 1 -drop_str.sentence = "I drop the object now!" - -# Giskard initialisieren und syncen -giskardpy.init_giskard_interface() -giskardpy.sync_worlds() - -RobotStateUpdater("/tf", "/joint_states") - -with semi_real_robot: - ParkArmsAction([Arms.LEFT]).resolve().perform() - #LookAtAction(targets=[Pose([-2.6, 2.0, 0])]).resolve().perform() - - NavigateAction(target_locations=[Pose([-2.6, 0, 0], robo_orientation)]).resolve().perform() - NavigateAction(target_locations=[Pose([-2.6, 1.5, 0], robo_orientation)]).resolve().perform() - - pick_pose = Pose([-2.6, 2.0, 0.8]) - LookAtAction(targets=[pick_pose]).resolve().perform() - object_desig = DetectAction(milk_desig).resolve().perform() - - PickUpAction(object_designator_description=object_desig, - arms=["left"], - grasps=["front"]).resolve().perform() - print(object_desig.pose) - - pub.publish(drop_str) - new_pose = object_desig.pose - place_pose = Pose([new_pose.position.x, new_pose.position.y + 0.3, new_pose.position.z]) - PlaceAction(object_desig, [place_pose], ["left"]).resolve().perform() - print("place finished") -# MoveGripperMotion("open", "left", allow_gripper_collision=True).resolve().perform() diff --git a/demos/pycram_test_demos/place_test_demo.py b/demos/pycram_test_demos/place_test_demo.py deleted file mode 100644 index 0ad1b7112..000000000 --- a/demos/pycram_test_demos/place_test_demo.py +++ /dev/null @@ -1,66 +0,0 @@ -from pycram.process_module import real_robot -from pycram.designators.action_designator import * -import pycram.external_interfaces.giskard as giskardpy -from pycram.designators.object_designator import * -from pycram.enums import ObjectType -from pycram.ros.robot_state_updater import RobotStateUpdater -from pycram.ros.viz_marker_publisher import VizMarkerPublisher - -world = BulletWorld("DIRECT") -v = VizMarkerPublisher() - -#broadcaster = TFBroadcaster() -#joint_publisher = JointStatePublisher("joint_states", 0.1) - -world.set_gravity([0, 0, -9.8]) -robot = Object("hsrb", "robot", "../../resources/" + robot_description.name + ".urdf") -robot_desig = ObjectDesignatorDescription(names=["hsrb"]).resolve() -robot.set_color([0.5, 0.5, 0.9, 1]) -robot_pose= robot.get_pose() -object_mug = BelieveObject(names=["metalmug"]) -milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([2.5, 2, 1.02]), color=[1, 0, 0, 1]) - -# Giskard initialisieren -giskardpy.init_giskard_interface() -RobotStateUpdater("/tf", "/giskard_joint_states") - -def try_pick_up(obj, grasps): - try: - PickUpAction(obj, ["left"], [grasps]).resolve().perform() - except (EnvironmentUnreachable, GripperClosedCompletely): - print("try pick up again") - TalkingMotion("Try pick up again") - NavigateAction([Pose([robot_pose.position.x - 0.3, robot_pose.position.y, robot_pose.position.z], - robot_pose.orientation)]).resolve().perform() - ParkArmsAction([Arms.LEFT]).resolve().perform() - if EnvironmentUnreachable: - object_desig = DetectAction(BelieveObject(types=[ObjectType.MILK]), technique='all').resolve().perform() - # TODO nur wenn key (name des vorherigen objektes) in object_desig enthalten ist - new_object = object_desig[obj.name] - else: - new_object = obj - try: - PickUpAction(new_object, ["left"], [grasps]).resolve().perform() - - except: - TalkingMotion(f"Can you pleas give me the {obj.name} object on the table? Thanks") - TalkingMotion(f"Please push down my hand, when I can grab the {obj.name}.") - - -with real_robot: - ParkArmsAction([Arms.LEFT]).resolve().perform() - #object_desig = DetectAction(BelieveObject(types=[ObjectType.METALMUG])).resolve().perform() - object_desig = DetectAction(BelieveObject(types=[ObjectType.MILK]), technique='all').resolve().perform() - # PickUpAction(object_designator_description=object_desig, - # arms=["left"], - # grasps=["top"]).resolve().perform() - giskardpy.initial_adding_objects() - giskardpy.spawn_kitchen() - - #try_pick_up(object_desig["Cerealbox"], "front") - #todo: macht avoid collision oder allow ueberhaupt was? muessten wir testen - #giskardpy.place_objects(object_desig["Cerealbox"],[Pose([4.82,1.8,0.73])]) - #todo: in dem designator noch pose bearbeiten jenachdem welches object_desig reinkommt bei cereal großen z bei spoon minimal z - # und atm ist nur front place drin neues argument how to place - - PlaceAction(object_desig["Cerealbox"], [Pose([4.92,1.8,0.74])], ["left"], ["front"]).resolve().perform() diff --git a/demos/pycram_test_demos/robokudo_test.py b/demos/pycram_test_demos/robokudo_test.py deleted file mode 100644 index cedc6977d..000000000 --- a/demos/pycram_test_demos/robokudo_test.py +++ /dev/null @@ -1,28 +0,0 @@ -from pycram import pose -from pycram.external_interfaces import robokudo -from pycram.process_module import real_robot, semi_real_robot -from pycram.designators.action_designator import * -import pycram.external_interfaces.giskard as giskardpy -from pycram.ros.viz_marker_publisher import VizMarkerPublisher -from pycram.designators.object_designator import * -from pycram.enums import ObjectType -from pycram.ros.robot_state_updater import RobotStateUpdater - -world = BulletWorld("DIRECT") -v = VizMarkerPublisher() - -#broadcaster = TFBroadcaster() -#joint_publisher = JointStatePublisher("joint_states", 0.1) - -world.set_gravity([0, 0, -9.8]) -robot = Object("hsrb", "robot", "../../resources/" + robot_description.name + ".urdf") -robot_desig = ObjectDesignatorDescription(names=["hsrb"]).resolve() -robot.set_color([0.5, 0.5, 0.9, 1]) -object_mug = BelieveObject(names=["metalmug"]) -milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([2.5, 2, 1.02]), color=[1, 0, 0, 1]) - - -with real_robot: - #todo we dont want to insert a object at all time - object_desig = DetectAction(BelieveObject(types=[ObjectType.MILK])).resolve().perform() - print("object desig is created") \ No newline at end of file diff --git a/demos/util-scripts/__init__.py b/demos/util-scripts/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/demos/pycram_suturo/save-urdf.py b/demos/util-scripts/save-urdf.py similarity index 100% rename from demos/pycram_suturo/save-urdf.py rename to demos/util-scripts/save-urdf.py diff --git a/src/pycram/designators/action_designator.py b/src/pycram/designators/action_designator.py index fcbb2b8dc..ba1f9a071 100644 --- a/src/pycram/designators/action_designator.py +++ b/src/pycram/designators/action_designator.py @@ -274,7 +274,11 @@ def ground(self) -> Action: class PickUpAction(ActionDesignatorDescription): """ - Designator to let the robot pick up an object. + A class representing a designator for a pick-up action, allowing a robot to pick up a specified object. + + This class encapsulates the details of the pick-up action, including the object to be picked up, the arm to be used, + and the grasp type. It defines the sequence of operations for the robot to execute the pick-up action, such as opening + the gripper, moving to the object, closing the gripper, and lifting the object. """ @dataclasses.dataclass @@ -292,37 +296,57 @@ class Action(ActionDesignatorDescription.Action): grasp: str """ - The grasp that should be used. For example, 'left' or 'right' + The grasp that should be used. For example, 'top' or 'front' """ @with_tree def perform(self) -> None: + # Initialize the local transformer and robot reference + lt = LocalTransformer() robot = BulletWorld.robot # Retrieve object and robot from designators object = self.object_designator.bullet_world_object - # oTm = Object Pose in Frame map + # Calculate the object's pose in the map frame oTm = object.get_pose() execute = True + + # Adjust object pose for top-grasping, if applicable if self.grasp == "top": + # Handle special cases for certain object types (e.g., Cutlery, Bowl) + # Note: This includes hardcoded adjustments and should ideally be generalized if self.object_designator.type == "Cutlery": + # todo: this z is the popcorn-table height, we need to define location to get that z otherwise it + # is hardcoded oTm.pose.position.z = 0.718 oTm.pose.position.z += 0.04 + # Determine the grasp orientation and transform the pose to the base link frame grasp_rotation = robot_description.grasps.get_orientation_for_grasp(self.grasp) oTb = lt.transform_pose(oTm, robot.get_link_tf_frame("base_link")) + # Set pose to the grasp rotation oTb.orientation = grasp_rotation + # Transform the pose to the map frame oTmG = lt.transform_pose(oTb, "map") + # Open the gripper before picking up the object rospy.logwarn("Opening Gripper") MoveGripperMotion(motion="open", gripper=self.arm).resolve().perform() - rospy.logwarn("Picking now") + + # Move to the pre-grasp position and visualize the action + rospy.logwarn("Picking up now") BulletWorld.current_bullet_world.add_vis_axis(oTmG) + # Execute Bool, because sometimes u only want to visualize the poses to test things if execute: MoveTCPMotion(oTmG, self.arm).resolve().perform() + # Calculate and apply any special knowledge offsets based on the robot and object type + # Note: This currently includes robot-specific logic that should be generalized tool_frame = robot_description.get_tool_frame(self.arm) special_knowledge_offset = lt.transform_pose(oTmG, robot.get_link_tf_frame(tool_frame)) + + # todo: this is for hsrb only at the moment we will need a function that returns us special knowledge + # depending on robot if robot.name == "hsrb": if self.grasp == "top": if self.object_designator.type == "Bowl": @@ -334,8 +358,9 @@ def perform(self) -> None: special_knowledge_offset.pose.position.x -= 0.11 print(special_knowledge_offset.pose.position.x) - push_base = special_knowledge_offset + # todo: this is for hsrb only at the moment we will need a function that returns us special knowledge + # depending on robot if we dont generlize this we will have a big list in the end of all robots if robot.name == "hsrb": z = 0.03 if self.grasp == "top": @@ -343,10 +368,11 @@ def perform(self) -> None: if self.object_designator.type == "Bowl": z = 0.07 push_base.pose.position.z += z - # todo: make this for other robots push_baseTm = lt.transform_pose(push_base, "map") special_knowledge_offsetTm = lt.transform_pose(push_base, "map") + # Grasping from the top inherently requires calculating an offset, whereas front grasping involves + # slightly pushing the object forward. if self.grasp == "top": rospy.logwarn("Offset now") BulletWorld.current_bullet_world.add_vis_axis(special_knowledge_offsetTm) @@ -358,6 +384,7 @@ def perform(self) -> None: if execute: MoveTCPMotion(push_baseTm, self.arm).resolve().perform() + # Finalize the pick-up by closing the gripper and lifting the object rospy.logwarn("Close Gripper") MoveGripperMotion(motion="close", gripper=self.arm).resolve().perform() @@ -461,8 +488,7 @@ def perform(self) -> None: MoveTCPMotion(oTmG, self.arm).resolve().perform() tool_frame = robot_description.get_tool_frame(self.arm) - special_knowledge_offset = lt.transform_pose(oTmG, robot.get_link_tf_frame(tool_frame)) - push_base = special_knowledge_offset + push_base = lt.transform_pose(oTmG, robot.get_link_tf_frame(tool_frame)) if robot.name == "hsrb": z = 0.03 if self.grasp == "top": @@ -470,7 +496,6 @@ def perform(self) -> None: push_base.pose.position.z += z # todo: make this for other robots push_baseTm = lt.transform_pose(push_base, "map") - special_knowledge_offsetTm = lt.transform_pose(push_base, "map") rospy.logwarn("Pushing now") MoveTCPMotion(push_baseTm, self.arm).resolve().perform() @@ -731,10 +756,7 @@ def ground(self) -> Action: class DetectAction(ActionDesignatorDescription): """ - Detects an object that fits the object description and returns a list of an object designator describing the object. - If you choose technique=all, it will return a list of all objects in the field of view. - returns a dict of perceived_objects. You can enter it by either perceived_object_dict["cereal"].name or - perceived_object_dict.values())[0].name.. + Detects an object with given technique. """ @dataclasses.dataclass @@ -750,9 +772,19 @@ class Action(ActionDesignatorDescription.Action): Object designator loosely describing the object, e.g. only type. """ + state: Optional[str] = None + """ + The state instructs our perception system to either start or stop the search for an object or human. + """ + @with_tree def perform(self) -> Any: - return DetectingMotion(technique=self.technique).resolve().perform() + if self.object_designator: + object_type = self.object_designator.type + else: + object_type = None + return DetectingMotion(technique=self.technique, object_type=object_type, + state=self.state).resolve().perform() def to_sql(self) -> ORMDetectAction: return ORMDetectAction() @@ -769,16 +801,20 @@ def insert(self, session: sqlalchemy.orm.session.Session, *args, **kwargs) -> OR return action def __init__(self, technique, resolver=None, - object_designator_description: Optional[ObjectDesignatorDescription] = None): + object_designator_description: Optional[ObjectDesignatorDescription] = None, + state: Optional[str] = None): """ Tries to detect an object in the field of view (FOV) of the robot. + :param technique: Technique means how the object should be detected, e.g. 'color', 'shape', etc. :param object_designator_description: Object designator describing the object + :param state: The state instructs our perception system to either start or stop the search for an object or human. :param resolver: An alternative resolver """ super().__init__(resolver) self.technique: str = technique self.object_designator_description: Optional[ObjectDesignatorDescription] = object_designator_description + self.state: Optional[str] = state def ground(self) -> Action: """ diff --git a/src/pycram/designators/motion_designator.py b/src/pycram/designators/motion_designator.py index 95902589d..105cf19b5 100644 --- a/src/pycram/designators/motion_designator.py +++ b/src/pycram/designators/motion_designator.py @@ -148,7 +148,7 @@ class Motion(MotionDesignatorDescription.Motion): """ Arm that is currently holding the object """ - grasp : str + grasp: str @with_tree def perform(self): @@ -384,33 +384,34 @@ class Motion(MotionDesignatorDescription.Motion): Type of the object that should be detected """ + state: Optional[str] = None + """ + The state instructs our perception system to either start or stop the search for an object or human. + """ + def perform(self): pm_manager = ProcessModuleManager.get_manager() bullet_world_objects = pm_manager.detecting().execute(self) - #return bullet_world_objects if not bullet_world_objects: - raise PerceptionObjectNotFound( - f"Could not find an object with the type {self.object_type} in the FOV of the robot") + raise PerceptionObjectNotFound( + f"Could not find an object with the type {self.object_type} in the FOV of the robot") return bullet_world_objects - # if ProcessModuleManager.execution_type == "real": - # return RealObject.Object(bullet_world_object.name, bullet_world_object.type, - # bullet_world_object, bullet_world_object.get_pose()) - # - # return ObjectDesignatorDescription.Object(bullet_world_object.name, bullet_world_object.type, - # bullet_world_object) - def __init__(self, technique: str, resolver: Optional[Callable] = None, object_type: Optional[str] = None): + def __init__(self, technique: str, resolver: Optional[Callable] = None, object_type: Optional[str] = None, + state: Optional[str] = None): """ - Checks for every object in the FOV of the robot if it fits the given object type. If the types match an object - designator describing the object will be returned. + Detects an object in the FOV of the robot. If an object type is given then the object will be searched for + :param technique: Technique that should be used for detecting, e.g. 'color', 'shape', 'all', etc. :param object_type: Type of the object which should be detected + :param state The state instructs our perception system to either start or stop the search for an object or human. :param resolver: An alternative resolver which returns a resolved motion designator """ super().__init__(resolver) self.cmd: str = 'detecting' self.technique: str = technique self.object_type: Optional[str] = object_type + self.state: Optional[str] = state def ground(self) -> Motion: """ @@ -418,7 +419,7 @@ def ground(self) -> Motion: :return: A resolved motion designator """ - return self.Motion(self.cmd, self.object_type, self.technique) + return self.Motion(self.cmd, self.technique, self.object_type, self.state) class MoveArmJointsMotion(MotionDesignatorDescription): @@ -709,7 +710,7 @@ def perform(self): pm_manager = ProcessModuleManager.get_manager() return pm_manager.talk().execute(self) - def __init__(self, cmd: str, resolver: Optional[Callable] = None): + def __init__(self, cmd: str, resolver: Optional[Callable] = None): """ Lets the robot close a container specified by the given parameter. This assumes that the handle is already grasped diff --git a/src/pycram/external_interfaces/knowrob.py b/src/pycram/external_interfaces/knowrob.py index de3a4c7af..b8288c449 100644 --- a/src/pycram/external_interfaces/knowrob.py +++ b/src/pycram/external_interfaces/knowrob.py @@ -1,10 +1,11 @@ import logging import os import sys - +from std_srvs.srv import IsKnown import rospy import rosservice + from typing import Dict, List, Union SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) @@ -127,19 +128,21 @@ def knowrob_string_to_pose(pose_as_string: str) -> List[float]: qxyzw = list(map(float, ori.split(","))) return xyz + qxyzw -def get_guest_info(id: int): +def get_guest_info(id): """ function that uses Knowledge Service to get Name and drink from new guest via ID """ - #TODO: Service für Name und Getränk mit ID einbauen, wenn dieser fertig rospy.wait_for_service('name_server') try: - #TODO: Service class herausfinden - info_service = rospy.ServiceProxy('name_server', List) - guest_data = info_service(id) #guest_data = List der Form ["name", "drink"] - drink = info_service(id) - return guest_data + info_service = rospy.ServiceProxy('name_server', IsKnown) + guest_data = info_service(id) #guest_data = "name, drink" + result = guest_data.split(',') #result = ["name", " drink"] + return result except rospy.ServiceException as e: print("Service call failed") + + + + diff --git a/src/pycram/external_interfaces/robokudo.py b/src/pycram/external_interfaces/robokudo.py index f4dcf7080..05d403bbb 100644 --- a/src/pycram/external_interfaces/robokudo.py +++ b/src/pycram/external_interfaces/robokudo.py @@ -189,6 +189,20 @@ def listener(): listener() while not human_bool: rospy.loginfo_throttle(3, "Waiting for human to be detected") + #TODO: Roboter sprechen lassen? "please step in front of me" pass - return human_pose \ No newline at end of file + return human_pose + +def stop_queryHuman() -> Any: + """ + Sends a query to RoboKudo to stop human detection + """ + init_robokudo_interface() + from robokudo_msgs.msg import QueryAction + + client = actionlib.SimpleActionClient('robokudo/query', QueryAction) + rospy.loginfo("Waiting for action server") + client.wait_for_server() + client.cancel_goal() + rospy.loginfo("cancelled current goal") diff --git a/src/pycram/process_modules/hsrb_process_modules.py b/src/pycram/process_modules/hsrb_process_modules.py index 7f1a189c2..140c1fb65 100644 --- a/src/pycram/process_modules/hsrb_process_modules.py +++ b/src/pycram/process_modules/hsrb_process_modules.py @@ -12,7 +12,7 @@ from ..enums import JointType, ObjectType from ..external_interfaces import giskard from ..external_interfaces.ik import request_ik -from ..external_interfaces.robokudo import queryEmpty, queryHuman +from ..external_interfaces.robokudo import queryEmpty, queryHuman, stop_queryHuman from ..helper import _apply_ik from ..local_transformer import LocalTransformer from ..external_interfaces.navigate import queryPoseNav @@ -345,7 +345,7 @@ class HSRBDetectingReal(ProcessModule): def _execute(self, desig: DetectingMotion.Motion) -> Any: # todo at the moment perception ignores searching for a specific object type so we do as well on real - if desig.technique == 'human': + if desig.technique == 'human' and (desig.state == "start" or desig.state == None): human_pose = queryHuman() pose = Pose.from_pose_stamped(human_pose) pose.position.z = 0 @@ -359,6 +359,10 @@ def _execute(self, desig: DetectingMotion.Motion) -> Any: return object_dict return human_pose + elif desig.technique == 'human' and desig.state == "stop": + stop_queryHuman() + return None + query_result = queryEmpty(ObjectDesignatorDescription(types=[desig.object_type])) perceived_objects = []