diff --git a/binder/01-import.py b/binder/01-import.py index 6cb050273..ea51fc73e 100755 --- a/binder/01-import.py +++ b/binder/01-import.py @@ -3,7 +3,7 @@ sys.path.insert(0, '/home/jovyan/workspace/ros/src/pycram') from demos.pycram_virtual_building_demos.setup.setup_launch_robot import launch_pr2, launch_hsrb, launch_stretch, \ - launch_tiago, launch_justin, launch_donbot + launch_tiago, launch_justin, launch_donbot, launch_armar, launch_icub from pycram.datastructures.enums import ObjectType, WorldMode from pycram.designators.object_designator import * from pycram.object_descriptors.urdf import ObjectDescription @@ -27,3 +27,7 @@ launch_justin() elif robot_param == 'donbot': launch_donbot() +elif robot_param == 'armar': + launch_armar() +elif robot_param == 'icub': + launch_icub() diff --git a/binder/pycram-http.rosinstall b/binder/pycram-http.rosinstall index 4ad9ec467..0830eeb96 100644 --- a/binder/pycram-http.rosinstall +++ b/binder/pycram-http.rosinstall @@ -5,8 +5,8 @@ repositories: version: master iai_robots: type: git - url: http://github.com/code-iai/iai_robots.git - version: master + url: http://github.com/LucaKro/iai_robots.git + version: armar6 pr2_common: type: git url: https://github.com/PR2/pr2_common.git diff --git a/demos/pycram_bullet_world_demo/demo.py b/demos/pycram_bullet_world_demo/demo.py index 1ee2d14a1..606eb9fdd 100644 --- a/demos/pycram_bullet_world_demo/demo.py +++ b/demos/pycram_bullet_world_demo/demo.py @@ -1,10 +1,13 @@ -from pycram.ros.viz_marker_publisher import VizMarkerPublisher, AxisMarkerPublisher +from pycram.external_interfaces.ik import request_ik +from pycram.plan_failures import IKError +from pycram.ros.viz_marker_publisher import VizMarkerPublisher, AxisMarkerPublisher, CostmapPublisher +from pycram.utils import _apply_ik from pycram.worlds.bullet_world import BulletWorld from pycram.designators.action_designator import * from pycram.designators.location_designator import * from pycram.designators.object_designator import * from pycram.datastructures.enums import ObjectType, WorldMode, TorsoState -from pycram.datastructures.pose import Pose +from pycram.datastructures.pose import Pose, Transform from pycram.process_module import simulated_robot, with_simulated_robot from pycram.object_descriptors.urdf import ObjectDescription from pycram.world_concepts.world_object import Object @@ -19,17 +22,46 @@ apartment = Object("apartment", ObjectType.ENVIRONMENT, f"apartment-small{extension}") -milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([2.5, 2, 1.02]), - color=Color(1, 0, 0, 1)) -cereal = Object("cereal", ObjectType.BREAKFAST_CEREAL, "breakfast_cereal.stl", - pose=Pose([2.5, 2.5, 1.05]), color=Color(0, 1, 0, 1)) -bowl = Object("bowl", ObjectType.BOWL, "bowl.stl", pose=Pose([2.4, 2.2, 0.98]), - color=Color(1, 1, 0, 1)) -spoon = Object("spoon", ObjectType.SPOON, "spoon.stl", pose=Pose([2.4, 2.2, 0.85]), - color=Color(0, 0, 1, 1)) -apartment.attach(spoon, 'cabinet10_drawer_top') +if robot.name == "iCub": + milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([4.7, 4.65, 0.8]), + color=Color(1, 0, 0, 1)) + milk_target_pose = Pose([4.8, 3.45, 0.8]) -pick_pose = Pose([2.7, 2.15, 1]) + cereal = Object("cereal", ObjectType.BREAKFAST_CEREAL, "breakfast_cereal.stl", + pose=Pose([4.65, 4.75, 0.8]), color=Color(0, 1, 0, 1)) + cereal_target_pose = Pose([4.85, 3.3, 0.8], [0, 0, 1, 1]) + + bowl = Object("bowl", ObjectType.BOWL, "bowl.stl", pose=Pose([4.7, 4.5, 0.75], [0, 0, -1, 1]), + color=Color(1, 1, 0, 1)) + bowl_target_pose = Pose([5, 3.3, 0.75], [0, 0, 0, 1]) + + spoon = Object("spoon", ObjectType.SPOON, "spoon.stl", pose=Pose([4.7, 4.3, 0.75], [0, 0, -1, 1]), + color=Color(0, 0, 1, 1)) + spoon_target_pose = Pose([5.2, 3.3, 0.8], [0, 0, 1, 1]) + + pick_pose = Pose([4.7, 4.5, 0.8]) + nav_pose = Pose([4, 4.5, 0]) +else: + milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([2.5, 2, 1.02], [0, 0, 1, 1]), + color=Color(1, 0, 0, 1)) + milk_target_pose = Pose([4.8, 3.55, 0.8]) + + cereal = Object("cereal", ObjectType.BREAKFAST_CEREAL, "breakfast_cereal.stl", + pose=Pose([2.5, 2.5, 1.05]), color=Color(0, 1, 0, 1)) + cereal_target_pose = Pose([5.2, 3.4, 0.8], [0, 0, 1, 1]) + + bowl = Object("bowl", ObjectType.BOWL, "bowl.stl", pose=Pose([2.4, 2.2, 0.98]), + color=Color(1, 1, 0, 1)) + bowl_target_pose = Pose([5, 3.3, 0.8], [0, 0, 1, 1]) + + spoon = Object("spoon", ObjectType.SPOON, "spoon.stl", pose=Pose([2.5, 2.2, 0.85]), + color=Color(0, 0, 1, 1)) + spoon_target_pose = Pose([4.85, 3.3, 0.8], [0, 0, 1, 1]) + + apartment.attach(spoon, 'cabinet10_drawer_top') + + pick_pose = Pose([2.7, 2.15, 1]) + nav_pose = Pose([1.5, 2, 0]) robot_desig = BelieveObject(names=[robot_name]) apartment_desig = BelieveObject(names=["apartment"]) @@ -37,7 +69,7 @@ @with_simulated_robot def move_and_detect(obj_type): - NavigateAction(target_locations=[Pose([1.7, 2, 0])]).resolve().perform() + NavigateAction(target_locations=[nav_pose]).resolve().perform() LookAtAction(targets=[pick_pose]).resolve().perform() @@ -47,89 +79,109 @@ def move_and_detect(obj_type): with simulated_robot: + NavigateAction([Pose([0, 0, 0])]).resolve().perform() + ParkArmsAction([Arms.BOTH]).resolve().perform() MoveTorsoAction([TorsoState.HIGH]).resolve().perform() + # handle_desig = ObjectPart(names=["handle_cab3_door_top"], part_of=apartment_desig.resolve()) + # closed_location, opened_location = AccessingLocation(handle_desig=handle_desig.resolve(), + # robot_desig=robot_desig.resolve()).resolve() + # OpenAction(object_designator_description=handle_desig, arms=[closed_location.arms[0]], start_goal_location=[closed_location, opened_location]).resolve().perform() + # NavigateAction([Pose([0, 0, 0])]).resolve().perform() + # CloseAction(object_designator_description=handle_desig, arms=[closed_location.arms[0]], start_goal_location=[opened_location, closed_location]).resolve().perform() + # ParkArmsAction([Arms.BOTH]).resolve().perform() + milk_desig = move_and_detect(ObjectType.MILK) - TransportAction(milk_desig, [Arms.LEFT], [Pose([4.8, 3.55, 0.8])]).resolve().perform() + TransportAction(milk_desig, [Arms.LEFT], [milk_target_pose]).resolve().perform() cereal_desig = move_and_detect(ObjectType.BREAKFAST_CEREAL) - TransportAction(cereal_desig, [Arms.LEFT], [Pose([5.2, 3.4, 0.8], [0, 0, 1, 1])]).resolve().perform() - - bowl_desig = move_and_detect(ObjectType.BOWL) + TransportAction(cereal_desig, [Arms.LEFT], [cereal_target_pose]).resolve().perform() - TransportAction(bowl_desig, [Arms.LEFT], [Pose([5, 3.3, 0.8], [0, 0, 1, 1])]).resolve().perform() + if not robot.name == "tiago_dual": + bowl_desig = move_and_detect(ObjectType.BOWL) - # Finding and navigating to the drawer holding the spoon - handle_desig = ObjectPart(names=["handle_cab10_t"], part_of=apartment_desig.resolve()) + TransportAction(bowl_desig, [Arms.LEFT], [bowl_target_pose]).resolve().perform() - if robot.name == "rollin_justin": - pose = Pose([1.4, 1.6, 0], [0, 0, 0.2040033016133158, 0.9789702002261697]) - drawer_open_loc = AccessingLocation.Location(pose, [Arms.LEFT]) + if robot.name == "iCub": + spoon_desig = move_and_detect(ObjectType.SPOON) + TransportAction(spoon_desig, [Arms.LEFT], [spoon_target_pose]).resolve().perform() else: - drawer_open_loc = AccessingLocation(handle_desig=handle_desig.resolve(), - robot_desig=robot_desig.resolve()).resolve() + # Finding and navigating to the drawer holding the spoon + handle_desig = ObjectPart(names=["handle_cab10_t"], part_of=apartment_desig.resolve()) + closed_location, opened_location = AccessingLocation(handle_desig=handle_desig.resolve(), + robot_desig=robot_desig.resolve()).resolve() - NavigateAction([drawer_open_loc.pose]).resolve().perform() + NavigateAction([closed_location.pose]).resolve().perform() - OpenAction(object_designator_description=handle_desig, arms=[drawer_open_loc.arms[0]]).resolve().perform() - spoon.detach(apartment) + OpenAction(object_designator_description=handle_desig, arms=[closed_location.arms[0]], + start_goal_location=[closed_location, opened_location]).resolve().perform() + spoon.detach(apartment) - # Detect and pickup the spoon - LookAtAction([apartment.get_link_pose("handle_cab10_t")]).resolve().perform() - ParkArmsAction([Arms.BOTH]).resolve().perform() + # Detect and pickup the spoon + ParkArmsAction([Arms.BOTH]).resolve().perform() + MoveTorsoAction([TorsoState.HIGH]).resolve().perform() + LookAtAction([apartment.get_link_pose("handle_cab10_t")]).resolve().perform() - spoon_desig = DetectAction(BelieveObject(types=[ObjectType.SPOON])).resolve().perform() + spoon_desig = DetectAction(BelieveObject(types=[ObjectType.SPOON])).resolve().perform() - if robot.name == "iai_donbot": - ParkArmsAction([Arms.BOTH]).resolve().perform() - PickUpAction(spoon_desig, [Arms.LEFT], [Grasp.TOP]).resolve().perform() + if robot.name == "iai_donbot": + ParkArmsAction([Arms.BOTH]).resolve().perform() + PickUpAction(spoon_desig, [Arms.LEFT], [Grasp.TOP]).resolve().perform() - ParkArmsAction([Arms.BOTH]).resolve().perform() + ParkArmsAction([Arms.BOTH]).resolve().perform() - # Find a pose to place the spoon, move and then place it - spoon_target_pose = Pose([4.85, 3.3, 0.8], [0, 0, 1, 1]) - placing_loc = CostmapLocation(target=spoon_target_pose, reachable_for=robot_desig.resolve()).resolve() + # Find a pose to place the spoon, move and then place it + placing_loc = CostmapLocation(target=spoon_target_pose, reachable_for=robot_desig.resolve(), + reachable_arm=Arms.LEFT, + used_grasps=[Grasp.TOP], object_in_hand=spoon_desig).resolve() - NavigateAction([placing_loc.pose]).resolve().perform() + NavigateAction([placing_loc.pose]).resolve().perform() - PlaceAction(spoon_desig, [spoon_target_pose], [Arms.LEFT]).resolve().perform() + PlaceAction(spoon_desig, [spoon_target_pose], [Arms.LEFT]).resolve().perform() - ParkArmsAction([Arms.BOTH]).resolve().perform() + ParkArmsAction([Arms.BOTH]).resolve().perform() - NavigateAction([drawer_open_loc.pose]).resolve().perform() + NavigateAction([closed_location.pose]).resolve().perform() - CloseAction(object_designator_description=handle_desig, arms=[drawer_open_loc.arms[0]]).resolve().perform() + CloseAction(object_designator_description=handle_desig, arms=[closed_location.arms[0]], + start_goal_location=[opened_location, closed_location]).resolve().perform() - ParkArmsAction([Arms.BOTH]).resolve().perform() + ParkArmsAction([Arms.BOTH]).resolve().perform() - else: - if robot.name == "tiago_dual": - NavigateAction([Pose([1.45, 2.7, 0], [0, 0, 0, 1])]).resolve().perform() + else: - pickup_arm = Arms.LEFT if drawer_open_loc.arms[0] == Arms.RIGHT else Arms.RIGHT - ParkArmsAction([Arms.BOTH]).resolve().perform() - PickUpAction(spoon_desig, [pickup_arm], [Grasp.TOP]).resolve().perform() + pickup_arm = Arms.LEFT if closed_location.arms[0] == Arms.RIGHT else Arms.RIGHT + try: + PickUpAction(spoon_desig, [pickup_arm], [Grasp.TOP]).resolve().perform() + except IKError: + pickup_loc = CostmapLocation(target=spoon_desig, reachable_for=robot_desig.resolve(), + reachable_arm=pickup_arm, used_grasps=[Grasp.TOP]).resolve() + ParkArmsAction([Arms.BOTH]).resolve().perform() + NavigateActionPerformable(pickup_loc.pose).perform() + PickUpAction(spoon_desig, [pickup_arm], [Grasp.TOP]).resolve().perform() - ParkArmsAction([Arms.LEFT if pickup_arm == Arms.LEFT else Arms.RIGHT]).resolve().perform() + ParkArmsAction([Arms.LEFT if pickup_arm == Arms.LEFT else Arms.RIGHT]).resolve().perform() - NavigateAction([drawer_open_loc.pose]).resolve().perform() + NavigateAction([opened_location.pose]).resolve().perform() - CloseAction(object_designator_description=handle_desig, arms=[drawer_open_loc.arms[0]]).resolve().perform() + CloseAction(object_designator_description=handle_desig, arms=[closed_location.arms[0]], + start_goal_location=[opened_location, closed_location]).resolve().perform() - ParkArmsAction([Arms.BOTH]).resolve().perform() + ParkArmsAction([Arms.BOTH]).resolve().perform() - MoveTorsoAction([TorsoState.MID]).resolve().perform() + MoveTorsoAction([TorsoState.MID]).resolve().perform() - # Find a pose to place the spoon, move and then place it - spoon_target_pose = Pose([4.85, 3.3, 0.8], [0, 0, 1, 1]) - placing_loc = CostmapLocation(target=spoon_target_pose, reachable_for=robot_desig.resolve(), reachable_arm=pickup_arm, used_grasps=[Grasp.TOP]).resolve() + # Find a pose to place the spoon, move and then place it + placing_loc = CostmapLocation(target=spoon_target_pose, reachable_for=robot_desig.resolve(), + reachable_arm=pickup_arm, used_grasps=[Grasp.TOP], + object_in_hand=spoon_desig).resolve() - NavigateAction([placing_loc.pose]).resolve().perform() + NavigateAction([placing_loc.pose]).resolve().perform() - PlaceAction(spoon_desig, [spoon_target_pose], [pickup_arm]).resolve().perform() + PlaceAction(spoon_desig, [spoon_target_pose], [pickup_arm]).resolve().perform() - ParkArmsAction([Arms.BOTH]).resolve().perform() + ParkArmsAction([Arms.BOTH]).resolve().perform() diff --git a/demos/pycram_virtual_building_demos/setup/setup_launch_robot.py b/demos/pycram_virtual_building_demos/setup/setup_launch_robot.py index 0b4201af9..b7a000e9b 100644 --- a/demos/pycram_virtual_building_demos/setup/setup_launch_robot.py +++ b/demos/pycram_virtual_building_demos/setup/setup_launch_robot.py @@ -54,18 +54,35 @@ def launch_donbot(): args = ["robot:=donbot"] launch_robot(executable, args=args) +def launch_armar(): + # name = 'Armar6' + # urdf = 'Armar6.urdf' + executable = 'ik_and_description.launch' + args = ["robot:=armar"] + launch_robot(executable, args=args) + +def launch_icub(): + # name = 'iCub' + # urdf = 'iCub.urdf' + executable = 'ik_and_description.launch' + args = ["robot:=icub"] + launch_robot(executable, args=args) def launch_robot(launch_file, package='pycram', launch_folder='/launch/', args: List[str] = None): """ - General method to start a specified launch file with given parameters. - Default location for launch files here is in the folder 'launch' inside the pycram package + Starts a specified launch file with given parameters. + + The default location for launch files is the 'launch' folder inside the pycram package. - :param launch_file: File name of the launch file - :param package: Name of the package - :param launch_folder: Location of the launch file inside the package - :param args: List of arguments to pass onto the launch file + Args: + launch_file (str): File name of the launch file. + package (str): Name of the package. + launch_folder (str): Location of the launch file inside the package. + args (list): List of arguments to pass to the launch file. + + Returns: + None """ - # Suppress all output from the function rospath = rospkg.RosPack() uuid = roslaunch.rlutil.get_or_generate_uuid(None, False) @@ -82,6 +99,4 @@ def launch_robot(launch_file, package='pycram', launch_folder='/launch/', args: launch.start() rospy.loginfo(f'{launch_file} started') - - # Wait for ik server to launch time.sleep(2) diff --git a/demos/pycram_virtual_building_demos/setup/setup_utils.py b/demos/pycram_virtual_building_demos/setup/setup_utils.py index f96867ef2..65b528941 100644 --- a/demos/pycram_virtual_building_demos/setup/setup_utils.py +++ b/demos/pycram_virtual_building_demos/setup/setup_utils.py @@ -30,12 +30,26 @@ def update_text(text_widget, new_text): text_widget.value = f'{new_text}' -def get_robot_name(robot_name): - if robot_name == 'justin': - return 'rollin_justin' - elif robot_name == 'donbot': - return 'iai_donbot' - elif robot_name == 'tiago': - return 'tiago_dual' - - return robot_name +def get_robot_name(robot_name: str): + """ + Retrieves the internal identifier for a specified robot name. + + This function maps a given `robot_name` to a predefined internal identifier. + If `robot_name` does not match any known names, it returns the input unchanged. + + Args: + robot_name (str): The name of the robot to map to its internal identifier. + + Returns: + str: The internal identifier for the specified robot name, or the input + `robot_name` if no predefined match is found. + """ + robot_name_map = { + 'justin': 'rollin_justin', + 'donbot': 'iai_donbot', + 'tiago': 'tiago_dual', + 'armar': 'Armar6', + 'icub': 'iCub' + } + + return robot_name_map.get(robot_name, robot_name) \ No newline at end of file diff --git a/demos/pycram_virtual_building_demos/setup_demo_manager.py b/demos/pycram_virtual_building_demos/setup_demo_manager.py index 779ee573d..5dab41cef 100644 --- a/demos/pycram_virtual_building_demos/setup_demo_manager.py +++ b/demos/pycram_virtual_building_demos/setup_demo_manager.py @@ -5,7 +5,6 @@ from demos.pycram_virtual_building_demos.src.cleanup_demo import cleanup_demo from demos.pycram_virtual_building_demos.src.generlized_actions_demo import start_generalized_demo from demos.pycram_virtual_building_demos.src.transport_demo import transporting_demo -from pycram.utils import suppress_stdout_stderr # sys.path.insert(0, '/home/vee/robocup_workspaces/pycram_ws/src/pycram') sys.path.insert(0, '/home/jovyan/workspace/ros/src/pycram') @@ -40,6 +39,10 @@ def start_demo(): update_text(text_widget, 'Loading process~ Please wait...') world = BulletWorld(WorldMode.DIRECT) + + # Set this to True to publish costmaps and axis marker during the demo. May slow down the simulation. + world.allow_publish_debug_poses = False + VizMarkerPublisher() robot = Object(robot_name, ObjectType.ROBOT, f"{robot_name}{extension}", pose=Pose([1, 2, 0])) apartment = Object(environment_param, ObjectType.ENVIRONMENT, f"{environment_param}{extension}") @@ -80,11 +83,12 @@ def demo_selecting(apartment, robot, task_param): if task_param == "navigating": navigate_simple_example() elif task_param == "transporting": - specialized_task = rospy.get_param('/nbparam_specialized_task') + specialized_task = None + # specialized_task = rospy.get_param('/nbparam_specialized_task') if specialized_task == "clean": - transporting_demo(apartment, robot) - else: cleanup_demo(apartment, robot) + else: + transporting_demo(apartment, robot) elif task_param in ["cutting", "mixing", "pouring"]: # object_target = rospy.get_param('/nbparam_object') # object_tool = rospy.get_param('/nbparam_object_tool') @@ -97,7 +101,6 @@ def demo_selecting(apartment, robot, task_param): else: object_target = "banana" object_tool = "butter_knife" - specialized_task = rospy.get_param('/nbparam_specialized_task') start_generalized_demo(task_param, object_tool, object_target, specialized_task) diff --git a/demos/pycram_virtual_building_demos/src/transport_demo.py b/demos/pycram_virtual_building_demos/src/transport_demo.py index 4964277b9..84f163415 100644 --- a/demos/pycram_virtual_building_demos/src/transport_demo.py +++ b/demos/pycram_virtual_building_demos/src/transport_demo.py @@ -1,5 +1,6 @@ from IPython.core.display_functions import clear_output +from pycram.plan_failures import IKError from pycram.ros.viz_marker_publisher import VizMarkerPublisher, AxisMarkerPublisher from pycram.worlds.bullet_world import BulletWorld from pycram.designators.action_designator import * @@ -12,6 +13,7 @@ from pycram.world_concepts.world_object import Object from pycram.datastructures.dataclasses import Color + # extension = ObjectDescription.get_file_extension() # world = BulletWorld(WorldMode.DIRECT) @@ -24,25 +26,53 @@ # apartment = Object("apartment", ObjectType.ENVIRONMENT, f"apartment-small{extension}") def transporting_demo(apartment, robot): - milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([2.5, 2, 1.02]), - color=Color(1, 0, 0, 1)) - cereal = Object("cereal", ObjectType.BREAKFAST_CEREAL, "breakfast_cereal.stl", - pose=Pose([2.5, 2.5, 1.05]), color=Color(0, 1, 0, 1)) - bowl = Object("bowl", ObjectType.BOWL, "bowl.stl", pose=Pose([2.4, 2.2, 0.98]), - color=Color(1, 1, 0, 1)) - spoon = Object("spoon", ObjectType.SPOON, "spoon.stl", pose=Pose([2.4, 2.2, 0.85]), - color=Color(0, 0, 1, 1)) - apartment.attach(spoon, 'cabinet10_drawer_top') - - pick_pose = Pose([2.7, 2.15, 1]) + if robot.name == "iCub": + milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([4.7, 4.65, 0.8]), + color=Color(1, 0, 0, 1)) + milk_target_pose = Pose([4.8, 3.45, 0.8]) + + cereal = Object("cereal", ObjectType.BREAKFAST_CEREAL, "breakfast_cereal.stl", + pose=Pose([4.65, 4.75, 0.8]), color=Color(0, 1, 0, 1)) + cereal_target_pose = Pose([4.85, 3.3, 0.8], [0, 0, 1, 1]) + + bowl = Object("bowl", ObjectType.BOWL, "bowl.stl", pose=Pose([4.7, 4.5, 0.75], [0, 0, -1, 1]), + color=Color(1, 1, 0, 1)) + bowl_target_pose = Pose([5, 3.3, 0.75], [0, 0, 0, 1]) + + spoon = Object("spoon", ObjectType.SPOON, "spoon.stl", pose=Pose([4.7, 4.3, 0.75], [0, 0, -1, 1]), + color=Color(0, 0, 1, 1)) + spoon_target_pose = Pose([5.2, 3.3, 0.8], [0, 0, 1, 1]) + + pick_pose = Pose([4.7, 4.5, 0.8]) + nav_pose = Pose([4, 4.5, 0]) + else: + milk = Object("milk", ObjectType.MILK, "milk.stl", pose=Pose([2.5, 2, 1.02], [0, 0, 1, 1]), + color=Color(1, 0, 0, 1)) + milk_target_pose = Pose([4.8, 3.55, 0.8]) + + cereal = Object("cereal", ObjectType.BREAKFAST_CEREAL, "breakfast_cereal.stl", + pose=Pose([2.5, 2.5, 1.05]), color=Color(0, 1, 0, 1)) + cereal_target_pose = Pose([5.2, 3.4, 0.8], [0, 0, 1, 1]) + + bowl = Object("bowl", ObjectType.BOWL, "bowl.stl", pose=Pose([2.4, 2.2, 0.98]), + color=Color(1, 1, 0, 1)) + bowl_target_pose = Pose([5, 3.3, 0.8], [0, 0, 1, 1]) + + spoon = Object("spoon", ObjectType.SPOON, "spoon.stl", pose=Pose([2.5, 2.2, 0.85]), + color=Color(0, 0, 1, 1)) + spoon_target_pose = Pose([4.85, 3.3, 0.8], [0, 0, 1, 1]) + + apartment.attach(spoon, 'cabinet10_drawer_top') + + pick_pose = Pose([2.7, 2.15, 1]) + nav_pose = Pose([1.5, 2, 0]) robot_desig = BelieveObject(names=[robot.name]) apartment_desig = BelieveObject(names=[apartment.name]) - @with_simulated_robot def move_and_detect(obj_type): - NavigateAction(target_locations=[Pose([1.7, 2, 0])]).resolve().perform() + NavigateAction(target_locations=[nav_pose]).resolve().perform() LookAtAction(targets=[pick_pose]).resolve().perform() @@ -57,90 +87,102 @@ def move_and_detect(obj_type): milk_desig = move_and_detect(ObjectType.MILK) - TransportAction(milk_desig, [Arms.LEFT], [Pose([4.8, 3.55, 0.8])]).resolve().perform() + TransportAction(milk_desig, [Arms.LEFT], [milk_target_pose]).resolve().perform() clear_output() cereal_desig = move_and_detect(ObjectType.BREAKFAST_CEREAL) if robot.name == "tiago_dual": - TransportAction(cereal_desig, [Arms.LEFT], [Pose([5.25, 3.4, 0.85], [0, 0, 0, -1])]).resolve().perform() + #cereal_target_pose = Pose([5.25, 3.4, 0.85], [0, 0, 0, -1]) + TransportAction(cereal_desig, [Arms.LEFT], [cereal_target_pose]).resolve().perform() else: - TransportAction(cereal_desig, [Arms.LEFT], [Pose([5.2, 3.4, 0.8], [0, 0, 1, 1])]).resolve().perform() + TransportAction(cereal_desig, [Arms.LEFT], [cereal_target_pose]).resolve().perform() clear_output() - bowl_desig = move_and_detect(ObjectType.BOWL) + if not robot.name == "tiago_dual": + bowl_desig = move_and_detect(ObjectType.BOWL) - TransportAction(bowl_desig, [Arms.LEFT], [Pose([5, 3.3, 0.8], [0, 0, 1, 1])]).resolve().perform() - clear_output() + TransportAction(bowl_desig, [Arms.LEFT], [bowl_target_pose]).resolve().perform() + clear_output() - # Finding and navigating to the drawer holding the spoon - handle_desig = ObjectPart(names=["handle_cab10_t"], part_of=apartment_desig.resolve()) + if robot.name == "iCub": + spoon_desig = move_and_detect(ObjectType.SPOON) - if robot.name == "rollin_justin": - pose = Pose([1.4, 1.6, 0], [0, 0, 0.2040033016133158, 0.9789702002261697]) - drawer_open_loc = AccessingLocation.Location(pose, [Arms.LEFT]) + TransportAction(spoon_desig, [Arms.LEFT], [spoon_target_pose]).resolve().perform() + clear_output() else: - drawer_open_loc = AccessingLocation(handle_desig=handle_desig.resolve(), - robot_desig=robot_desig.resolve()).resolve() - - NavigateAction([drawer_open_loc.pose]).resolve().perform() - OpenAction(object_designator_description=handle_desig, arms=[drawer_open_loc.arms[0]]).resolve().perform() - spoon.detach(apartment) + # Finding and navigating to the drawer holding the spoon + handle_desig = ObjectPart(names=["handle_cab10_t"], part_of=apartment_desig.resolve()) + closed_location, opened_location = AccessingLocation(handle_desig=handle_desig.resolve(), + robot_desig=robot_desig.resolve()).resolve() - # Detect and pickup the spoon - LookAtAction([apartment.get_link_pose("handle_cab10_t")]).resolve().perform() - ParkArmsAction([Arms.BOTH]).resolve().perform() + NavigateAction([closed_location.pose]).resolve().perform() - spoon_desig = DetectAction(BelieveObject(types=[ObjectType.SPOON])).resolve().perform() + OpenAction(object_designator_description=handle_desig, arms=[closed_location.arms[0]], + start_goal_location=[closed_location, opened_location]).resolve().perform() + spoon.detach(apartment) - if robot.name == "iai_donbot": + # Detect and pickup the spoon ParkArmsAction([Arms.BOTH]).resolve().perform() - PickUpAction(spoon_desig, [Arms.LEFT], [Grasp.TOP]).resolve().perform() + MoveTorsoAction([TorsoState.HIGH]).resolve().perform() + LookAtAction([apartment.get_link_pose("handle_cab10_t")]).resolve().perform() - ParkArmsAction([Arms.BOTH]).resolve().perform() + spoon_desig = DetectAction(BelieveObject(types=[ObjectType.SPOON])).resolve().perform() - # Find a pose to place the spoon, move and then place it - spoon_target_pose = Pose([4.85, 3.3, 0.8], [0, 0, 1, 1]) - placing_loc = CostmapLocation(target=spoon_target_pose, reachable_for=robot_desig.resolve()).resolve() + if robot.name == "iai_donbot": + ParkArmsAction([Arms.BOTH]).resolve().perform() + PickUpAction(spoon_desig, [Arms.LEFT], [Grasp.TOP]).resolve().perform() - NavigateAction([placing_loc.pose]).resolve().perform() + ParkArmsAction([Arms.BOTH]).resolve().perform() - PlaceAction(spoon_desig, [spoon_target_pose], [Arms.LEFT]).resolve().perform() + # Find a pose to place the spoon, move and then place it + placing_loc = CostmapLocation(target=spoon_target_pose, reachable_for=robot_desig.resolve(), + reachable_arm=Arms.LEFT, + used_grasps=[Grasp.TOP], object_in_hand=spoon_desig).resolve() - ParkArmsAction([Arms.BOTH]).resolve().perform() + NavigateAction([placing_loc.pose]).resolve().perform() - NavigateAction([drawer_open_loc.pose]).resolve().perform() + PlaceAction(spoon_desig, [spoon_target_pose], [Arms.LEFT]).resolve().perform() - CloseAction(object_designator_description=handle_desig, arms=[drawer_open_loc.arms[0]]).resolve().perform() + ParkArmsAction([Arms.BOTH]).resolve().perform() - ParkArmsAction([Arms.BOTH]).resolve().perform() + NavigateAction([closed_location.pose]).resolve().perform() - else: - if robot.name == "tiago_dual": - NavigateAction([Pose([1.45, 2.7, 0], [0, 0, 0, 1])]).resolve().perform() + CloseAction(object_designator_description=handle_desig, arms=[closed_location.arms[0]], + start_goal_location=[opened_location, closed_location]).resolve().perform() - pickup_arm = Arms.LEFT if drawer_open_loc.arms[0] == Arms.RIGHT else Arms.RIGHT - ParkArmsAction([Arms.BOTH]).resolve().perform() - PickUpAction(spoon_desig, [pickup_arm], [Grasp.TOP]).resolve().perform() + ParkArmsAction([Arms.BOTH]).resolve().perform() + else: + pickup_arm = Arms.LEFT if closed_location.arms[0] == Arms.RIGHT else Arms.RIGHT + try: + PickUpAction(spoon_desig, [pickup_arm], [Grasp.TOP]).resolve().perform() + except IKError: + pickup_loc = CostmapLocation(target=spoon_desig, reachable_for=robot_desig.resolve(), + reachable_arm=pickup_arm, used_grasps=[Grasp.TOP]).resolve() + ParkArmsAction([Arms.BOTH]).resolve().perform() + NavigateActionPerformable(pickup_loc.pose).perform() + PickUpAction(spoon_desig, [pickup_arm], [Grasp.TOP]).resolve().perform() - ParkArmsAction([Arms.LEFT if pickup_arm == Arms.LEFT else Arms.RIGHT]).resolve().perform() + ParkArmsAction([Arms.LEFT if pickup_arm == Arms.LEFT else Arms.RIGHT]).resolve().perform() - NavigateAction([drawer_open_loc.pose]).resolve().perform() + NavigateAction([opened_location.pose]).resolve().perform() - CloseAction(object_designator_description=handle_desig, arms=[drawer_open_loc.arms[0]]).resolve().perform() + CloseAction(object_designator_description=handle_desig, arms=[closed_location.arms[0]], + start_goal_location=[opened_location, closed_location]).resolve().perform() - ParkArmsAction([Arms.BOTH]).resolve().perform() + ParkArmsAction([Arms.BOTH]).resolve().perform() - MoveTorsoAction([TorsoState.MID]).resolve().perform() + MoveTorsoAction([TorsoState.MID]).resolve().perform() - # Find a pose to place the spoon, move and then place it - spoon_target_pose = Pose([4.85, 3.3, 0.8], [0, 0, 1, 1]) - placing_loc = CostmapLocation(target=spoon_target_pose, reachable_for=robot_desig.resolve(), - reachable_arm=pickup_arm, used_grasps=[Grasp.TOP]).resolve() + # Find a pose to place the spoon, move and then place it + placing_loc = CostmapLocation(target=spoon_target_pose, reachable_for=robot_desig.resolve(), + reachable_arm=pickup_arm, used_grasps=[Grasp.TOP], + object_in_hand=spoon_desig).resolve() - NavigateAction([placing_loc.pose]).resolve().perform() + NavigateAction([placing_loc.pose]).resolve().perform() - PlaceAction(spoon_desig, [spoon_target_pose], [pickup_arm]).resolve().perform() + PlaceAction(spoon_desig, [spoon_target_pose], [pickup_arm]).resolve().perform() + + ParkArmsAction([Arms.BOTH]).resolve().perform() - ParkArmsAction([Arms.BOTH]).resolve().perform() diff --git a/launch/ik_and_description.launch b/launch/ik_and_description.launch index 1e87a5c21..57126410d 100644 --- a/launch/ik_and_description.launch +++ b/launch/ik_and_description.launch @@ -62,10 +62,10 @@ textfile="$(find pycram)/resources/robots/iCub.urdf"/> - - + + + textfile="$(find pycram)/resources/robots/Armar6.urdf"/> diff --git a/resources/apartment-small.urdf b/resources/apartment-small.urdf index 1baaf0a3b..416c139ee 100644 --- a/resources/apartment-small.urdf +++ b/resources/apartment-small.urdf @@ -1892,6 +1892,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/resources/robots/Armar6.urdf b/resources/robots/Armar6.urdf new file mode 100644 index 000000000..791bcde7d --- /dev/null +++ b/resources/robots/Armar6.urdf @@ -0,0 +1,1797 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/resources/robots/iCub.urdf b/resources/robots/iCub.urdf index dfa316af6..bfe079b8e 100644 --- a/resources/robots/iCub.urdf +++ b/resources/robots/iCub.urdf @@ -2046,13 +2046,13 @@ - + - + diff --git a/src/pycram/costmaps.py b/src/pycram/costmaps.py index 43938ffcd..e752ad4b6 100644 --- a/src/pycram/costmaps.py +++ b/src/pycram/costmaps.py @@ -1,18 +1,22 @@ # used for delayed evaluation of typing until python 3.11 becomes mainstream from __future__ import annotations +from tqdm import tqdm from typing_extensions import Tuple, List, Optional import matplotlib.pyplot as plt from dataclasses import dataclass +from scipy.spatial.transform import Rotation as R import numpy as np import psutil import rospy from matplotlib import colors from nav_msgs.msg import OccupancyGrid, MapMetaData +from .datastructures.enums import Grasp from .datastructures.world import UseProspectionWorld +from .ros.viz_marker_publisher import CostmapPublisher from .world_concepts.world_object import Object from .description import Link from .local_transformer import LocalTransformer @@ -50,27 +54,26 @@ def scale(self, x_factor: float, y_factor: float): class Costmap: """ - The base class of all Costmaps which implements the visualization of costmaps - in the World. - """ + Base class for all costmaps, providing visualization of costmaps in the World. - def __init__(self, resolution: float, - height: int, - width: int, - origin: Pose, - map: np.ndarray, - world: Optional[World] = None): + This class handles essential properties such as resolution, dimensions, and origin + of the costmap, along with the costmap data itself. + """ + def __init__(self, resolution: float, height: int, width: int, origin: Pose, + map: np.ndarray, world: Optional[World] = None): """ - The constructor of the base class of all Costmaps. - - :param resolution: The distance in metre in the real-world which is - represented by a single entry in the costmap. - :param height: The height of the costmap. - :param width: The width of the costmap. - :param origin: The origin of the costmap, in world coordinate frame. The origin of the costmap is located in the - centre of the costmap. - :param map: The costmap represents as a 2D numpy array. - :param world: The World for which the costmap should be created. + Initializes the Costmap with specified resolution, dimensions, origin, and map data. + + Args: + resolution (float): The real-world distance in meters represented by a single + entry in the costmap. + height (int): The height of the costmap in grid cells. + width (int): The width of the costmap in grid cells. + origin (Pose): The origin of the costmap in world coordinates, centered + in the middle of the costmap. + map (np.ndarray): A 2D numpy array representing the costmap data. + world (Optional[World]): The World instance in which this costmap will be used. + Defaults to the current world. """ self.world = world if world else World.current_world self.resolution: float = resolution @@ -78,7 +81,8 @@ def __init__(self, resolution: float, self.height: int = height self.width: int = width local_transformer = LocalTransformer() - self.origin: Pose = local_transformer.transform_pose(origin, 'map') + self.origin = Pose(origin.position, [0, 0, 0, 1]) + self.origin: Pose = local_transformer.transform_pose(self.origin, 'map') self.map: np.ndarray = map self.vis_ids: List[int] = [] @@ -149,6 +153,45 @@ def visualize(self) -> None: linkCollisionShapeIndices=link_collision) self.vis_ids.append(map_obj) + def publish(self, weighted: bool = False, scale: float = 0.4): + """ + Publishes the costmap to the World, rendering it for visualization. + + This method iterates over all positions in the costmap where values are greater than zero, + transforming them into poses relative to the world. Optionally, the map values can be + visualized with scaled weights. + + Args: + weighted (bool): If True, scales the z-coordinate of each pose based on the costmap + value to visualize "height" as intensity. Defaults to False. + scale (float): A scaling factor for the intensity values when `weighted` is True. + Defaults to 0.4. + """ + indices = np.argwhere(self.map > 0) + height, width = self.map.shape + center = np.array([height // 2, width // 2]) + + origin_to_map = self.origin.to_transform("origin").invert() + + poses = [ + Pose( + (Transform([*((center - ind) * self.resolution), 0], + frame="point", child_frame="origin") * origin_to_map) + .invert().translation_as_list() + ) + for ind in tqdm(indices) + ] + + if weighted: + weights = np.round(self.map, 2) + poses = [ + Pose([pose.position.x, pose.position.y, weights[ind[0], ind[1]] * scale]) + for pose, ind in zip(poses, indices) + ] + + costmap = CostmapPublisher() + costmap.publish(poses=poses, size=self.resolution, scale=scale) + def _chunks(self, lst: List, n: int) -> List: """ Yield successive n-sized chunks from lst. @@ -207,47 +250,111 @@ def _find_max_box_height(self, start: Tuple[int, int], length: int, map: np.ndar curr_height += 1 return curr_height - def merge(self, other_cm: Costmap) -> Costmap: + def merge_or_prioritize(self, other_cm: Costmap, prioritize_overlap: bool = False) -> Costmap: """ - Merges the values of two costmaps and returns a new costmap that has for - every cell the merged values of both inputs. To merge two costmaps they - need to fulfill 3 constrains: + Merges two costmaps, creating a new costmap with updated cell values. + + If `prioritize_overlap` is set to True, overlapping regions are prioritized + by adding 1 to the overlapping cells in the base costmap, instead of completely merging them. Otherwise, the + merged costmap will combine values by multiplying overlapping cells. + + Merging conditions: + + 1. The origin (x, y coordinates and orientation) of both costmaps must match. + 2. The resolution of both costmaps must be identical. - 1. They need to have the same size - 2. They need to have the same x and y coordinates in the origin - 3. They need to have the same resolution + If these conditions are not met, a `ValueError` is raised. - If any of these constrains is not fulfilled a ValueError will be raised. + Args: + other_cm (Costmap): The other costmap to merge with this costmap. + prioritize_overlap (bool): If True, prioritize overlapping regions by adding + 1 to the overlapping cells. Defaults to False. - :param other_cm: The other costmap with which this costmap should be merged. - :return: A new costmap that contains the merged values + Returns: + Costmap: A new costmap containing the merged values of both inputs. + + Raises: + ValueError: If the origin or resolution of the costmaps do not match. """ - if self.size != other_cm.size: - raise ValueError("You can only merge costmaps of the same size.") - elif self.origin.position.x != other_cm.origin.position.x or self.origin.position.y != other_cm.origin.position.y \ + if self.origin.position.x != other_cm.origin.position.x or self.origin.position.y != other_cm.origin.position.y \ or self.origin.orientation != other_cm.origin.orientation: - raise ValueError("To merge costmaps, the x and y coordinate as well as the orientation must be equal.") + raise ValueError("To merge costmaps, the x and y coordinates as well as the orientation must be equal.") elif self.resolution != other_cm.resolution: raise ValueError("To merge two costmaps their resolution must be equal.") - new_map = np.zeros((self.height, self.width)) - # A nunpy array of the positions where both costmaps are greater than 0 - merge = np.logical_and(self.map > 0, other_cm.map > 0) - new_map[merge] = self.map[merge] * other_cm.map[merge] - new_map = (new_map / np.max(new_map)).reshape((self.height, self.width)) - return Costmap(self.resolution, self.height, self.width, self.origin, new_map) + + if self.size == other_cm.size: + smaller_map_padded = other_cm.map + larger_cm = self + else: + larger_cm, smaller_cm = (self, other_cm) if self.size > other_cm.size else (other_cm, self) + larger_size, smaller_size = larger_cm.size, smaller_cm.size + offset = int(larger_size - smaller_size) + odd = offset % 2 != 0 + smaller_map_padded = np.pad(smaller_cm.map, (offset // 2, offset // 2 + odd)) + + dimensions = larger_cm.map.shape[0] + new_map = np.zeros((dimensions, dimensions)) + overlap_region = np.logical_and(larger_cm.map > 0, smaller_map_padded > 0) + + if prioritize_overlap: + original_map = np.copy(larger_cm.map) if self.size >= other_cm.size else np.copy(smaller_map_padded) + original_map[overlap_region] += 1 + new_map = original_map + else: + new_map[overlap_region] = larger_cm.map[overlap_region] * smaller_map_padded[overlap_region] + + min_val = new_map.min() + max_val = new_map.max() + if max_val > min_val: + normalized_map = (new_map - min_val) / (max_val - min_val) + else: + normalized_map = np.ones_like(new_map) + + normalized_map = (normalized_map / np.max(normalized_map)).reshape((dimensions, dimensions)) + return Costmap(larger_cm.resolution, dimensions, dimensions, larger_cm.origin, normalized_map) def __add__(self, other: Costmap) -> Costmap: """ - Overloading of the "+" operator for merging of Costmaps. Furthermore, checks if 'other' is actual a Costmap and - raises a ValueError if this is not the case. Please check :func:`~Costmap.merge` for further information of merging. + Overloads the "+" operator to merge two Costmaps. + + If the `other` parameter is not a Costmap, raises a `ValueError`. The merging + process follows the same behavior as the `merge_or_prioritize` method with + default settings. + + Args: + other (Costmap): The other Costmap to merge with this Costmap. + + Returns: + Costmap: A new Costmap containing merged values from this and the other Costmap. + + Raises: + ValueError: If `other` is not a Costmap instance. + """ + if isinstance(other, Costmap): + return self.merge_or_prioritize(other) + else: + raise ValueError(f"Can only combine two costmaps, but received type {type(other)}") + + def __mul__(self, other: Costmap) -> Costmap: + """ + Overloads the "*" operator for prioritizing overlapping areas in two Costmaps. - :param other: Another Costmap - :return: A new Costmap that contains the merged values from this Costmap and the other Costmap + Uses the `merge_or_prioritize` method with `prioritize_overlap=True` to return + a new Costmap where weights in overlapping regions are increased. + + Args: + other (Costmap): The other Costmap to prioritize overlap with this Costmap. + + Returns: + Costmap: A new Costmap with prioritized overlapping weights. + + Raises: + ValueError: If `other` is not a Costmap instance. """ if isinstance(other, Costmap): - return self.merge(other) + return self.merge_or_prioritize(other, prioritize_overlap=True) else: - raise ValueError(f"Can only combine two costmaps other type was {type(other)}") + raise ValueError(f"Can only multiply with another Costmap, but received type {type(other)}") def partitioning_rectangles(self) -> List[Rectangle]: """ @@ -338,9 +445,9 @@ def __init__(self, distance_to_obstacle: float, self.size = size self.origin = Pose() if not origin else origin self.resolution = resolution - self.distance_obstacle = max(int(distance_to_obstacle / self.resolution), 1) + self.distance_obstacle = max(int(distance_to_obstacle / self.resolution), 2) self.map = self._create_from_world(size, resolution) - Costmap.__init__(self, resolution, size, size, self.origin, self.map) + Costmap.__init__(self, resolution, self.size, self.size, self.origin, self.map) def _calculate_diff_origin(self, height: int, width: int) -> Pose: """ @@ -443,10 +550,19 @@ def _create_from_world(self, size: int, resolution: float) -> np.ndarray: :param size: The size of this costmap. The size specifies the length of one side of the costmap. The costmap is created as a square. :param resolution: The resolution of this costmap. This determines how much meter a pixel in the costmap represents. """ + size_m = size / 100.0 + + num_pixels = int(size_m / resolution) + origin_position = self.origin.position_as_list() - # Generate 2d grid with indices - indices = np.concatenate(np.dstack(np.mgrid[int(-size / 2):int(size / 2), int(-size / 2):int(size / 2)]), + + half_num_pixels = num_pixels // 2 + upper_bound = half_num_pixels if num_pixels % 2 == 0 else half_num_pixels + 1 + + indices = np.concatenate(np.dstack(np.mgrid[-half_num_pixels:upper_bound, + -half_num_pixels:upper_bound]), axis=0) * resolution + np.array(origin_position[:2]) + # Add the z-coordinate to the grid, which is either 0 or 10 indices_0 = np.pad(indices, (0, 1), mode='constant', constant_values=5)[:-1] indices_10 = np.pad(indices, (0, 1), mode='constant', constant_values=0)[:-1] @@ -475,7 +591,7 @@ def _create_from_world(self, size: int, resolution: float) -> np.ndarray: res[i:j] = [1 if ray[0] == -1 else 0 for ray in r_t] i += len(n) - res = np.flip(np.reshape(np.array(res), (size, size))) + res = np.flip(np.reshape(np.array(res), (num_pixels, num_pixels))) map = np.pad(res, (int(self.distance_obstacle / 2), int(self.distance_obstacle / 2))) @@ -490,7 +606,8 @@ def _create_from_world(self, size: int, resolution: float) -> np.ndarray: map = (sum == (self.distance_obstacle * 2) ** 2).astype('int16') # The map loses some size due to the strides and because I dont want to # deal with indices outside of the index range - offset = self.size - map.shape[0] + self.size = num_pixels + offset = num_pixels - map.shape[0] odd = 0 if offset % 2 == 0 else 1 map = np.pad(map, (offset // 2, offset // 2 + odd)) @@ -712,39 +829,172 @@ def _generate_map(self): class GaussianCostmap(Costmap): """ - Gaussian Costmaps are 2D gaussian distributions around the origin with the given mean and sigma + A costmap representing a 2D Gaussian distribution centered at the origin. + + This class generates a square Gaussian distribution with specified mean and sigma. + Optionally, it can apply a circular mask to create a circular distribution. """ def __init__(self, mean: int, sigma: float, resolution: Optional[float] = 0.02, - origin: Optional[Pose] = None): + origin: Optional[Pose] = None, circular: bool = False, distance: float = 0.0): """ - This Costmap creates a 2D gaussian distribution around the origin with - the specified size. + Initializes a 2D Gaussian costmap with specified distribution parameters and options. + + Args: + mean (int): The mean for the Gaussian distribution, which also determines the + side length of the costmap in centimeters. + sigma (float): The standard deviation (sigma) of the Gaussian distribution. + resolution (Optional[float]): The resolution of the costmap, representing the + real-world meters per pixel. Defaults to 0.02. + origin (Optional[Pose]): The origin of the costmap in world coordinates. If None, + defaults to the center. + circular (bool): If True, applies a circular mask to the costmap to create a + circular distribution. Defaults to False. + distance (float): The distance from the origin where the Gaussian peak should be placed. - :param mean: The mean input for the gaussian distribution, this also specifies - the length of the side of the resulting costmap. The costmap is Created - as a square. - :param sigma: The sigma input for the gaussian distribution. - :param resolution: The resolution of the costmap, this specifies how much - meter a pixel represents. - :param origin: The origin of the costmap around which it will be created. - """ - self.gau: np.ndarray = self._gaussian_window(mean, sigma) - self.map: np.ndarray = np.outer(self.gau, self.gau) - self.size: float = mean + """ + self.size = mean / 100.0 + self.resolution = resolution + num_pixels = int(self.size / self.resolution) + self.distance = distance + self.map: np.ndarray = self._gaussian_costmap(num_pixels, sigma) + if circular: + self.map = self._apply_circular_mask(self.map, num_pixels) self.origin: Pose = Pose() if not origin else origin - Costmap.__init__(self, resolution, mean, mean, self.origin, self.map) + Costmap.__init__(self, resolution, num_pixels, num_pixels, self.origin, self.map) + + def _gaussian_costmap(self, mean: int, std: float) -> np.ndarray: + """ + Generates a 2D Gaussian ring costmap centered at the origin, where the peak is + located at a specified distance from the center. + + This method creates a Gaussian distribution around a ring centered on the costmap, + allowing for a peak intensity at a specific radius from the center. + + Args: + mean (int): The side length of the square costmap in pixels. + std (float): The standard deviation (sigma) of the Gaussian distribution. + + Returns: + np.ndarray: A 2D numpy array representing the Gaussian ring distribution. + """ + radius_in_pixels = self.distance / self.resolution + + y, x = np.ogrid[:mean, :mean] + center = (mean - 1) / 2.0 + distance_from_center = np.sqrt((x - center) ** 2 + (y - center) ** 2) - def _gaussian_window(self, mean: int, std: float) -> np.ndarray: + ring_costmap = np.exp(-((distance_from_center - radius_in_pixels) ** 2) / (2 * std ** 2)) + return ring_costmap + + def _apply_circular_mask(self, grid: np.ndarray, num_pixels: int) -> np.ndarray: + """ + Applies a circular mask to a 2D grid, setting values outside the circle to zero. + + The radius of the circular mask is half the size of the grid, so only values within + this radius from the center will be retained. + + Args: + grid (np.ndarray): The 2D Gaussian grid to apply the mask to. + num_pixels (int): The number of pixels along one axis of the square grid. + + Returns: + np.ndarray: The masked grid with values outside the circular area set to zero. + """ + y, x = np.ogrid[:num_pixels, :num_pixels] + center = num_pixels / 2 + radius = center + distance_from_center = np.sqrt((x - center) ** 2 + (y - center) ** 2) + circular_mask = distance_from_center <= radius + masked_grid = np.where(circular_mask, grid, 0) + return masked_grid + +class DirectionalCostmap(Costmap): + """ + A 2D Gaussian-based costmap focused in specific directions relative to the origin pose. + + The costmap is oriented such that it emphasizes the negative x-direction and both + positive and negative y-directions, while excluding the positive x-direction. + """ + + def __init__(self, size: int, face: Grasp, resolution: Optional[float] = 0.02, + origin: Optional[Pose] = None, has_object: bool = False): + """ + Initializes a directional costmap with Gaussian distribution focused in specific + directions, primarily the -x and y directions relative to the origin pose's orientation. + + Args: + size (int): The side length of the costmap in centimeters. + face (Grasp): The specific grasp direction for costmap alignment. + resolution (Optional[float]): The resolution of the costmap in meters per pixel. + Defaults to 0.02. + origin (Optional[Pose]): The origin pose of the costmap, determining its orientation. + Defaults to the world center. + has_object (bool): If True, considers that an object is present and adjusts the + costmap accordingly. Defaults to False. + """ + self.size = size / 100.0 + self.resolution = resolution + self.face = face + self.has_object = has_object + num_pixels = int(self.size / self.resolution) + self.origin = origin.copy() if origin else Pose() + self.origin.position.z = 0 + self.map: np.ndarray = self._create_directional_map(num_pixels) + Costmap.__init__(self, resolution, num_pixels, num_pixels, self.origin, self.map) + + def _create_directional_map(self, num_pixels: int) -> np.ndarray: """ - This method creates a window of values with a gaussian distribution of - size "mean" and standart deviation "std". - Code from `Scipy `_ + Creates a directional costmap based on Gaussian distributions, masking out the + positive x-direction relative to the local frame of the origin pose. + + The orientation of the costmap is determined by the specified face direction + (e.g., front, back, left, right), with a mask applied to exclude areas in + the positive x-direction. + + Args: + num_pixels (int): The number of pixels along one axis of the square grid. + + Returns: + np.ndarray: A 2D numpy array representing the directional costmap. """ - n = np.arange(0, mean) - (mean - 1.0) / 2.0 - sig2 = 2 * std * std - w = np.exp(-n ** 2 / sig2) - return w + object_orientation = self.origin.orientation_as_list() + relative_rotations = { + Grasp.FRONT: [0, 0, 0, 1], + Grasp.BACK: [0, 0, 1, 0], + Grasp.LEFT: [0, 0, -0.707, 0.707], + Grasp.RIGHT: [0, 0, 0.707, 0.707], + Grasp.TOP: [0, 0.707, 0, 0.707], + Grasp.BOTTOM: [0, -0.707, 0, 0.707] + } + # currently doesnt really do anything for TOP and BOTTOM, but didnt make any problems either + # and i havent had the time to investigate further or think of better handling for these cases + # TODO: investigate and improve handling for TOP and BOTTOM + face_rotation = relative_rotations[self.face] + + object_rotation = R.from_quat(object_orientation) + face_rotation = R.from_quat(face_rotation) + combined_rotation = object_rotation * face_rotation + combined_orientation = combined_rotation.as_quat() + + map = np.ones((num_pixels, num_pixels)) + + + rotation = R.from_quat(combined_orientation) + rotation_matrix = rotation.as_matrix() if self.has_object else rotation.inv().as_matrix() + + center = np.ceil(num_pixels / 2) + x, y = np.meshgrid(np.arange(num_pixels), np.arange(num_pixels)) + x_offset = (x - center) * self.resolution + y_offset = (y - center) * self.resolution + + coords = np.stack((x_offset, y_offset, np.zeros_like(x_offset)), axis=-1) + transformed_coords = coords.dot(rotation_matrix.T) + + mask = transformed_coords[:, :, 1] >= 0 + + directional_map = np.where(mask, 0, map) + return directional_map class SemanticCostmap(Costmap): diff --git a/src/pycram/datastructures/dataclasses.py b/src/pycram/datastructures/dataclasses.py index beb37ab4e..1430c3ab7 100644 --- a/src/pycram/datastructures/dataclasses.py +++ b/src/pycram/datastructures/dataclasses.py @@ -2,6 +2,7 @@ from dataclasses import dataclass +from matplotlib import pyplot as plt from std_msgs.msg import ColorRGBA from typing_extensions import List, Optional, Tuple, Callable, Dict, Any, Union, TYPE_CHECKING from .enums import JointType, Shape @@ -114,6 +115,32 @@ def get_color_from_string(color): return converted_color + @staticmethod + def gaussian_color_map(value, min_val=0.0, max_val=0.4, colormap='viridis'): + """ + Maps a Gaussian-scaled value to a `ColorRGBA` object for color visualization. + + This function normalizes `value` within a specified range and maps it to an RGB color + using a specified colormap. The alpha (opacity) is set to full (1.0). + + Args: + value (float): The Gaussian value to map, expected within `min_val` and `max_val`. + min_val (float, optional): Minimum value of the mapping range. Defaults to 0.0. + max_val (float, optional): Maximum value of the mapping range. Defaults to 0.4. + colormap (str, optional): The name of the Matplotlib colormap to use. Defaults to 'viridis'. + + Returns: + ColorRGBA: A `ColorRGBA` object where the RGB channels correspond to the color + mapped from `value` using the specified colormap, and alpha is 1.0. + """ + clamped_value = max(min(value, max_val), min_val) + normalized_value = (clamped_value - min_val) / (max_val - min_val) + + color_map = plt.get_cmap(colormap) + rgba_color = color_map(normalized_value) + + return ColorRGBA(r=rgba_color[0], g=rgba_color[1], b=rgba_color[2], a=1.0) + @dataclass class AxisAlignedBoundingBox: diff --git a/src/pycram/datastructures/enums.py b/src/pycram/datastructures/enums.py index 8ffe67153..a722ce602 100644 --- a/src/pycram/datastructures/enums.py +++ b/src/pycram/datastructures/enums.py @@ -47,6 +47,8 @@ class Grasp(Enum): LEFT = 1 RIGHT = 2 TOP = 3 + BACK = 4 + BOTTOM = 5 class ObjectType(Enum): diff --git a/src/pycram/designators/action_designator.py b/src/pycram/designators/action_designator.py index f1f0bf875..662c3c4b7 100644 --- a/src/pycram/designators/action_designator.py +++ b/src/pycram/designators/action_designator.py @@ -11,16 +11,15 @@ from tf import transformations from typing_extensions import Any, List, Union, Callable, Optional, Type -import rospy - -from .location_designator import CostmapLocation +from .location_designator import CostmapLocation, AccessingLocation from .motion_designator import MoveJointsMotion, MoveGripperMotion, MoveArmJointsMotion, MoveTCPMotion, MoveMotion, \ LookingMotion, DetectingMotion, OpeningMotion, ClosingMotion from .object_designator import ObjectDesignatorDescription, BelieveObject, ObjectPart from .. import utils +from ..helper import calculate_object_faces, adjust_grasp_for_object_rotation, \ + calculate_grasp_offset from ..local_transformer import LocalTransformer from ..plan_failures import ObjectUnfetchable, ReachabilityFailure -# from ..robot_descriptions import robot_description from ..robot_description import RobotDescription from ..ros.viz_marker_publisher import AxisMarkerPublisher from ..tasktree import with_tree @@ -45,6 +44,8 @@ from ..orm.action_designator import Action as ORMAction from dataclasses import dataclass, field +from ..utils import translate_relative_to_object + class MoveTorsoAction(ActionDesignatorDescription): """ @@ -410,70 +411,158 @@ def ground(self) -> DetectActionPerformable: class OpenAction(ActionDesignatorDescription): """ - Opens a container like object + Opens a container-like object using a designated arm and navigation poses. - Can currently not be used + This class handles the action of moving a robot's arm to interact with and open + container-like objects. It specifies both the arm(s) used and the start and goal + locations necessary for positioning the robot during the action. """ - def __init__(self, object_designator_description: ObjectPart, arms: List[Arms], resolver=None, - ontology_concept_holders: Optional[List[Thing]] = None): + object_designator_description: ObjectPart + """ + Describes the specific part or handle of the object used for opening. + """ + + arms: List[Arms] + """ + A list of possible arms designated for the opening action. + """ + + start_location: AccessingLocation.Location + """ + The start location for the navigation pose required to perform the action. + """ + + goal_location: Optional[AccessingLocation.Location] + """ + The goal location for the navigation pose, if different from the start location. + """ + + def __init__(self, object_designator_description: ObjectPart, arms: List[Arms], + start_goal_location: List[AccessingLocation.Location], + resolver=None, ontology_concept_holders: Optional[List[Thing]] = None): """ - Moves the arm of the robot to open a container. + Initializes the OpenAction with specified object, arm, and location parameters. - :param object_designator_description: Object designator describing the handle that should be used to open - :param arms: A list of possible arms that should be used - :param resolver: A alternative specialized_designators that returns a performable designator for the lists of possible parameter. - :param ontology_concept_holders: A list of ontology concepts that the action is categorized as or associated with + Args: + object_designator_description (ObjectPart): Describes the part or handle of + the object to be used for opening. + arms (List[Arms]): A list of arms available for the action. + start_goal_location (List[AccessingLocation.Location]): A list containing + the start and goal locations for the navigation poses required to perform + the action. If both entries are the same, only the start location is set. + resolver (optional): A specialized designator that returns a performable + designator for the list of possible parameters. + ontology_concept_holders (Optional[List[Thing]]): Ontology concepts categorizing + or associating the action with certain semantic meanings. """ super().__init__(resolver, ontology_concept_holders) self.object_designator_description: ObjectPart = object_designator_description self.arms: List[Arms] = arms + self.start_location: AccessingLocation.Location = start_goal_location[0] + self.goal_location: AccessingLocation.Location = None if start_goal_location[0] == start_goal_location[1] else \ + start_goal_location[1] if self.soma: self.init_ontology_concepts({"opening": self.soma.Opening}) def ground(self) -> OpenActionPerformable: """ - Default specialized_designators that returns a performable designator with the resolved object description and the first entries - from the lists of possible parameter. + Returns a performable designator for the `OpenAction`, using the resolved object + description and the primary entries from the provided parameter lists. - :return: A performable designator + This method creates an `OpenActionPerformable` instance based on the resolved + description of the target object, the first available arm, and the predefined + start and goal locations for executing the action. + + Returns: + OpenActionPerformable: An instance representing a performable designator for the + action, ready to be executed with the specified parameters. """ - return OpenActionPerformable(self.object_designator_description.resolve(), self.arms[0]) + return OpenActionPerformable( + self.object_designator_description.resolve(), + self.arms[0], + self.start_location, + self.goal_location + ) class CloseAction(ActionDesignatorDescription): """ - Closes a container like object. + Closes a container-like object using a designated arm and navigation poses. + + This class manages the action of moving a robot's arm to interact with and close + container-like objects. It specifies the arm(s) to use and the start and goal + locations required for positioning the robot during the action. + """ - Can currently not be used + object_designator_description: ObjectPart + """ + Describes the specific part or handle of the object used for closing. + """ + + arms: List[Arms] + """ + A list of possible arms designated for the closing action. + """ + + start_location: AccessingLocation.Location + """ + The start location for the navigation pose required to perform the action. + """ + + goal_location: Optional[AccessingLocation.Location] + """ + The goal location for the navigation pose, if different from the start location. """ def __init__(self, object_designator_description: ObjectPart, arms: List[Arms], + start_goal_location: List[AccessingLocation.Location], resolver=None, ontology_concept_holders: Optional[List[Thing]] = None): """ - Attempts to close an open container + Initializes the CloseAction with specified object, arm, and location parameters. - :param object_designator_description: Object designator description of the handle that should be used - :param arms: A list of possible arms to use - :param resolver: An alternative specialized_designators that returns a performable designator for the list of possible parameter - :param ontology_concept_holders: A list of ontology concepts that the action is categorized as or associated with + Args: + object_designator_description (ObjectPart): Describes the part or handle of + the object to be used for closing. + arms (List[Arms]): A list of arms available for the action. + start_goal_location (List[AccessingLocation.Location]): A list containing + the start and goal locations for the navigation poses required to perform + the action. If both entries are the same, only the start location is set. + resolver (optional): A specialized designator that returns a performable + designator for the list of possible parameters. + ontology_concept_holders (Optional[List[Thing]]): Ontology concepts categorizing + or associating the action with certain semantic meanings. """ super().__init__(resolver, ontology_concept_holders) self.object_designator_description: ObjectPart = object_designator_description self.arms: List[Arms] = arms + self.start_location: AccessingLocation.Location = start_goal_location[0] + self.goal_location: AccessingLocation.Location = None if start_goal_location[0] == start_goal_location[1] else \ + start_goal_location[1] if self.soma: self.init_ontology_concepts({"closing": self.soma.Closing}) def ground(self) -> CloseActionPerformable: """ - Default specialized_designators that returns a performable designator with the resolved object designator and the first entry from - the list of possible arms. + Returns a performable designator for the `CloseAction`, using the resolved object + designator and the primary entry from the list of possible arms. - :return: A performable designator + This method creates a `CloseActionPerformable` instance based on the resolved + description of the target object, the first available arm, and the predefined + start and goal locations for executing the action. + + Returns: + CloseActionPerformable: An instance representing a performable designator for + the action, ready to be executed with the specified parameters. """ - return CloseActionPerformable(self.object_designator_description.resolve(), self.arms[0]) + return CloseActionPerformable( + self.object_designator_description.resolve(), + self.arms[0], + self.start_location, + self.goal_location + ) class GraspingAction(ActionDesignatorDescription): @@ -1077,116 +1166,108 @@ class PickUpActionPerformable(ActionAbstract): object_designator: ObjectDesignatorDescription.Object """ - Object designator describing the object that should be picked up + Object designator describing the object that should be picked up. """ arm: Arms """ - The arm that should be used for pick up + The arm enum that should be used for pick up, e.g., Arms.LEFT or Arms.RIGHT. """ grasp: Grasp """ - The grasp that should be used. For example, 'left' or 'right' + The grasp enum that should be used, e.g., Grasp.FRONT or Grasp.RIGHT. """ object_at_execution: Optional[ObjectDesignatorDescription.Object] = field(init=False) """ - The object at the time this Action got created. It is used to be a static, information holding entity. It is - not updated when the BulletWorld object is changed. + The object at the time this action was created. Acts as a static information holder and + is not updated when the BulletWorld object is changed. """ - orm_class: Type[ActionAbstract] = field(init=False, default=ORMPickUpAction) + orm_class: Type[ActionAbstract] = field(init=False, default=ORMPickUpAction) + """ + ORM class type for this action. + """ @with_tree def perform(self) -> None: + """ + Executes the action to pick up the designated object with specified parameters, + moving the robot arm to the pre-pick position, grasping, and lifting the object. + """ # Store the object's data copy at execution self.object_at_execution = self.object_designator.frozen_copy() robot = World.robot # Retrieve object and robot from designators object = self.object_designator.world_object - # Get grasp orientation and target pose - grasp = RobotDescription.current_robot_description.get_arm_chain(self.arm).end_effector.grasps[self.grasp] # oTm = Object Pose in Frame map oTm = object.get_pose() - # Transform the object pose to the object frame, basically the origin of the object frame - mTo = object.local_transformer.transform_to_object_frame(oTm, object) - # Adjust the pose according to the special knowledge of the object designator - adjusted_pose = self.object_designator.special_knowledge_adjustment_pose(self.grasp, mTo) - # Transform the adjusted pose to the map frame - adjusted_oTm = object.local_transformer.transform_pose(adjusted_pose, "map") - # multiplying the orientation therefore "rotating" it, to get the correct orientation of the gripper - - adjusted_oTm.multiply_quaternions(grasp) - - # prepose depending on the gripper (its annoying we have to put pr2_1 here tbh - # gripper_frame = "pr2_1/l_gripper_tool_frame" if self.arm == "left" else "pr2_1/r_gripper_tool_frame" - gripper_frame = robot.get_link_tf_frame( - RobotDescription.current_robot_description.get_arm_chain(self.arm).get_tool_frame()) - # First rotate the gripper, so the further calculations makes sense - tmp_for_rotate_pose = object.local_transformer.transform_pose(adjusted_oTm, gripper_frame) - tmp_for_rotate_pose.pose.position.x = 0 - tmp_for_rotate_pose.pose.position.y = 0 - tmp_for_rotate_pose.pose.position.z = -0.1 - gripper_rotate_pose = object.local_transformer.transform_pose(tmp_for_rotate_pose, "map") - - # Perform Gripper Rotate - # BulletWorld.current_bullet_world.add_vis_axis(gripper_rotate_pose) - # MoveTCPMotion(gripper_rotate_pose, self.arm).resolve().perform() - - oTg = object.local_transformer.transform_pose(adjusted_oTm, gripper_frame) - oTg.pose.position.x -= 0.1 # in x since this is how the gripper is oriented - prepose = object.local_transformer.transform_pose(oTg, "map") - - # Perform the motion with the prepose and open gripper - World.current_world.add_vis_axis(prepose) - - marker = AxisMarkerPublisher() - gripper_pose = World.robot.get_link_pose( - RobotDescription.current_robot_description.get_arm_chain(self.arm).get_tool_frame()) - marker.publish([adjusted_oTm, gripper_pose], length=0.3) + # Adjust the pose according to the special knowledge of the object designator + # I think this should be irrelevant now due to the grasp_offset calculation, but not sure, so just commented out for now + # adjusted_pose = self.object_designator.special_knowledge_adjustment_pose(self.grasp, mTo) + adjusted_grasp = adjust_grasp_for_object_rotation(oTm, self.grasp, self.arm) + adjusted_oTm = oTm.copy() + adjusted_oTm.set_orientation(adjusted_grasp) + + grasp_offset = calculate_grasp_offset(object.get_object_dimensions(), self.arm, self.grasp) + palm_axis = RobotDescription.current_robot_description.get_palm_axis() + adjusted_oTm_grasp_pose = translate_relative_to_object(adjusted_oTm, palm_axis, grasp_offset) + + translation_value = 0.1 # hardcoded value for now + oTm_prepose = translate_relative_to_object(adjusted_oTm_grasp_pose, palm_axis, translation_value) + prepose = object.local_transformer.transform_pose(oTm_prepose, "map") + + if World.current_world.allow_publish_debug_poses: + gripper_pose = World.robot.get_link_pose( + RobotDescription.current_robot_description.get_arm_chain(self.arm).get_tool_frame()) + marker = AxisMarkerPublisher() + marker.publish([adjusted_oTm_grasp_pose, prepose, gripper_pose], length=0.3) MoveTCPMotion(prepose, self.arm, allow_gripper_collision=True).perform() MoveGripperMotion(motion=GripperState.OPEN, gripper=self.arm).perform() - # Perform the motion with the adjusted pose -> actual grasp and close gripper - World.current_world.add_vis_axis(adjusted_oTm) - MoveTCPMotion(adjusted_oTm, self.arm, allow_gripper_collision=True).perform() - adjusted_oTm.pose.position.z += 0.03 + MoveTCPMotion(adjusted_oTm_grasp_pose, self.arm, allow_gripper_collision=True).perform() MoveGripperMotion(motion=GripperState.CLOSE, gripper=self.arm).perform() tool_frame = RobotDescription.current_robot_description.get_arm_chain(self.arm).get_tool_frame() robot.attach(object, tool_frame) - # Lift object - World.current_world.add_vis_axis(adjusted_oTm) - MoveTCPMotion(adjusted_oTm, self.arm, allow_gripper_collision=True).perform() - - # Remove the vis axis from the world - World.current_world.remove_vis_axis() + adjusted_oTm_grasp_pose.pose.position.z += 0.03 + MoveTCPMotion(adjusted_oTm_grasp_pose, self.arm, allow_gripper_collision=True).perform() @dataclass class PlaceActionPerformable(ActionAbstract): """ - Places an Object at a position using an arm. + Places an object at a specified position using an arm. """ object_designator: ObjectDesignatorDescription.Object """ - Object designator describing the object that should be place + Describes the object that should be placed. """ + arm: Arms """ - Arm that is currently holding the object + The arm currently holding the object. """ + target_location: Pose """ - Pose in the world at which the object should be placed + The world pose at which the object should be placed. """ + orm_class: Type[ActionAbstract] = field(init=False, default=ORMPlaceAction) + """ + ORM class type for this action. + """ @with_tree def perform(self) -> None: + """ + Executes the place action, positioning the arm to the specified target location, + releasing the object, and retracting the arm. + """ object_pose = self.object_designator.world_object.get_pose() local_tf = LocalTransformer() @@ -1198,12 +1279,23 @@ def perform(self) -> None: target_diff = self.target_location.to_transform("target").inverse_times( tcp_to_object.to_transform("object")).to_pose() + if World.current_world.allow_publish_debug_poses: + gripper_pose = World.robot.get_link_pose( + RobotDescription.current_robot_description.get_arm_chain(self.arm).get_tool_frame()) + marker = AxisMarkerPublisher() + marker.publish([target_diff, self.target_location, gripper_pose], length=0.3) + MoveTCPMotion(target_diff, self.arm).perform() MoveGripperMotion(GripperState.OPEN, self.arm).perform() World.robot.detach(self.object_designator.world_object) + retract_pose = local_tf.transform_pose(target_diff, World.robot.get_link_tf_frame( RobotDescription.current_robot_description.get_arm_chain(self.arm).get_tool_frame())) - retract_pose.position.x -= 0.07 + + palm_axis = RobotDescription.current_robot_description.get_palm_axis() + translation_value = 0.1 + retract_pose = translate_relative_to_object(retract_pose, palm_axis, translation_value) + MoveTCPMotion(retract_pose, self.arm).perform() @@ -1227,54 +1319,84 @@ def perform(self) -> None: @dataclass class TransportActionPerformable(ActionAbstract): """ - Transports an object to a position using an arm + Transports an object to a specified position using an arm. """ object_designator: ObjectDesignatorDescription.Object """ - Object designator describing the object that should be transported. + Describes the object that should be transported. """ + arm: Arms """ - Arm that should be used + The arm designated for transporting the object. """ + target_location: Pose """ - Target Location to which the object should be transported + The target world pose to which the object should be transported. """ + orm_class: Type[ActionAbstract] = field(init=False, default=ORMTransportAction) + """ + ORM class type for this action. + """ @with_tree def perform(self) -> None: + """ + Executes the transport action by navigating to the pick-up and target locations + to grasp, transport, and place the designated object. + + The process includes: + 1. Parking both arms for initial positioning. + 2. Determining the appropriate grasp based on object type. + 3. Finding a reachable pose for the robot to pick up the object with the designated arm. + 4. Navigating to the pick-up pose, performing the pick-up, and then parking arms again. + 5. Resolving a place location reachable by the robot to place the object. + 6. Navigating to the place location, performing the placement, and parking arms post-transport. + + Raises: + ObjectUnfetchable: If no reachable pose is found for the pick-up. + ReachabilityFailure: If no reachable location is found for the target location. + """ robot_desig = BelieveObject(names=[RobotDescription.current_robot_description.name]) ParkArmsActionPerformable(Arms.BOTH).perform() if self.object_designator.obj_type == ObjectType.BOWL or self.object_designator.obj_type == ObjectType.SPOON: - grasp = Grasp.TOP + grasp = calculate_object_faces(self.object_designator)[1] else: - grasp = Grasp.FRONT + grasp = calculate_object_faces(self.object_designator)[0] + + pickup_loc = CostmapLocation( + target=self.object_designator, + reachable_for=robot_desig.resolve(), + reachable_arm=self.arm, + used_grasps=[grasp] + ) - pickup_loc = CostmapLocation(target=self.object_designator, reachable_for=robot_desig.resolve(), - reachable_arm=self.arm, used_grasps=[grasp]) # Tries to find a pick-up posotion for the robot that uses the given arm - pickup_pose = None - for pose in pickup_loc: - if self.arm in pose.reachable_arms: - pickup_pose = pose - break + pickup_pose = next((pose for pose in pickup_loc if self.arm in pose.reachable_arms), None) if not pickup_pose: raise ObjectUnfetchable( - f"Found no pose for the robot to grasp the object: {self.object_designator} with arm: {self.arm}") + f"No reachable pose found for the robot to grasp the object: {self.object_designator} with arm: {self.arm}" + ) NavigateActionPerformable(pickup_pose.pose).perform() PickUpActionPerformable(self.object_designator, self.arm, grasp).perform() ParkArmsActionPerformable(Arms.BOTH).perform() try: - place_loc = CostmapLocation(target=self.target_location, reachable_for=robot_desig.resolve(), - reachable_arm=self.arm, used_grasps=[grasp]).resolve() + place_loc = CostmapLocation( + target=self.target_location, + reachable_for=robot_desig.resolve(), + reachable_arm=self.arm, + used_grasps=[grasp], + object_in_hand=self.object_designator + ).resolve() except StopIteration: raise ReachabilityFailure( - f"No location found from where the robot can reach the target location: {self.target_location}") + f"No reachable location found for the target location: {self.target_location}" + ) NavigateActionPerformable(place_loc.pose).perform() PlaceActionPerformable(self.object_designator, self.arm, self.target_location).perform() ParkArmsActionPerformable(Arms.BOTH).perform() @@ -1322,23 +1444,50 @@ def perform(self) -> None: @dataclass class OpenActionPerformable(ActionAbstract): """ - Opens a container like object + Opens a container-like object using a specified arm. """ object_designator: ObjectPart.Object """ - Object designator describing the object that should be opened + Describes the object that should be opened. """ + arm: Arms """ - Arm that should be used for opening the container + The arm designated for opening the container. + """ + + start_location: AccessingLocation.Location + """ + The initial location from which the opening action begins. + """ + + goal_location: Optional[AccessingLocation.Location] + """ + The final goal location for the opening action. """ + orm_class: Type[ActionAbstract] = field(init=False, default=ORMOpenAction) + """ + ORM class type for this action. + """ @with_tree def perform(self) -> None: + """ + Executes the open action by navigating to the start location, performing a grasp, + and executing the opening motion to the goal location. + + The process includes: + 1. Navigating to the specified `start_location`. + 2. Performing the grasp action on the designated object. + 3. Executing the opening motion towards the `goal_location`. + 4. Releasing the gripper to complete the opening. + + """ + NavigateAction([self.start_location.pose]).resolve().perform() GraspingActionPerformable(self.arm, self.object_designator).perform() - OpeningMotion(self.object_designator, self.arm).perform() + OpeningMotion(self.object_designator, self.arm, self.goal_location).perform() MoveGripperMotion(GripperState.OPEN, self.arm, allow_gripper_collision=True).perform() @@ -1346,23 +1495,50 @@ def perform(self) -> None: @dataclass class CloseActionPerformable(ActionAbstract): """ - Closes a container like object. + Closes a container-like object using a specified arm. """ object_designator: ObjectPart.Object """ - Object designator describing the object that should be closed + Describes the object that should be closed. """ + arm: Arms """ - Arm that should be used for closing + The arm designated for closing the container. + """ + + start_location: AccessingLocation.Location + """ + The initial location from which the closing action begins. + """ + + goal_location: Optional[AccessingLocation.Location] + """ + The final goal location for the closing action. """ + orm_class: Type[ActionAbstract] = field(init=False, default=ORMCloseAction) + """ + ORM class type for this action. + """ @with_tree def perform(self) -> None: + """ + Executes the close action by navigating to the start location, performing a grasp, + and executing the closing motion to the goal location. + + The process includes: + 1. Navigating to the specified `start_location`. + 2. Performing the grasp action on the designated object. + 3. Executing the closing motion towards the `goal_location`. + 4. Releasing the gripper to complete the closing. + + """ + NavigateAction([self.start_location.pose]).resolve().perform() GraspingActionPerformable(self.arm, self.object_designator).perform() - ClosingMotion(self.object_designator, self.arm).perform() + ClosingMotion(self.object_designator, self.arm, self.goal_location).perform() MoveGripperMotion(GripperState.OPEN, self.arm, allow_gripper_collision=True).perform() @@ -1370,46 +1546,64 @@ def perform(self) -> None: @dataclass class GraspingActionPerformable(ActionAbstract): """ - Grasps an object described by the given Object Designator description + Grasps an object as described by the given object designator. """ + arm: Arms """ - The arm that should be used to grasp + The arm to be used for grasping the object. """ + object_desig: Union[ObjectDesignatorDescription.Object, ObjectPart.Object] """ - Object Designator for the object that should be grasped + Object designator for the object that should be grasped. """ + orm_class: Type[ActionAbstract] = field(init=False, default=ORMGraspingAction) + """ + ORM class type for this action. + """ @with_tree def perform(self) -> None: + """ + Executes the grasp action by navigating to a pre-grasp position and grasping the object. + + The sequence includes: + 1. Determining the object's pose and adjusting the grasp orientation. + 2. Translating to a pre-grasp position. + 3. Moving the robot's arm to the pre-grasp position and opening the gripper. + 4. Moving to the grasp position and closing the gripper to secure the object. + + Notes: + - The grasp side is currently hardcoded to `Grasp.FRONT`. + - This approach adjusts the pose orientation to align with the grasp angle. + """ if isinstance(self.object_desig, ObjectPart.Object): object_pose = self.object_desig.part_pose else: object_pose = self.object_desig.world_object.get_pose() - if RobotDescription.current_robot_description.name == "tiago_dual": - object_pose = object_pose.copy() - object_pose.set_orientation([0, 0, 0, 1]) + # TODO: there is a difference in which side faces the object during costmap and execution, so hardcoded for now, fix it + # grasp = calculate_object_faces(self.object_desig)[0] + grasp = Grasp.FRONT - grasp = RobotDescription.current_robot_description.get_arm_chain(self.arm).end_effector.grasps[Grasp.FRONT] - object_pose.multiply_quaternions(grasp) + adjusted_grasp = adjust_grasp_for_object_rotation(object_pose, grasp, self.arm) + object_pose = object_pose.copy() + object_pose.set_orientation(adjusted_grasp) - lt = LocalTransformer() - gripper_name = RobotDescription.current_robot_description.get_arm_chain(self.arm).get_tool_frame() - - object_pose_in_gripper = lt.transform_pose(object_pose, - World.robot.get_link_tf_frame(gripper_name)) + palm_axis = RobotDescription.current_robot_description.get_palm_axis() + translation_value = 0.05 + local_transformer = LocalTransformer() + oTm_prepose = translate_relative_to_object(object_pose, palm_axis, translation_value) - marker = AxisMarkerPublisher() - gripper_pose = World.robot.get_link_pose(gripper_name) - pre_grasp = object_pose_in_gripper.copy() - # pre_grasp.pose.position.x -= 0.1 + prepose = local_transformer.transform_pose(oTm_prepose, "map") - # marker.publish([object_pose, gripper_pose], name="Grasping", length=0.3) + if World.current_world.allow_publish_debug_poses: + marker = AxisMarkerPublisher() + marker.publish([object_pose, prepose], name="Grasping", length=0.3) - MoveTCPMotion(pre_grasp, self.arm).perform() + MoveTCPMotion(prepose, self.arm).perform() MoveGripperMotion(GripperState.OPEN, self.arm).perform() MoveTCPMotion(object_pose, self.arm, allow_gripper_collision=True).perform() diff --git a/src/pycram/designators/location_designator.py b/src/pycram/designators/location_designator.py index 32b1e02c4..687322aef 100644 --- a/src/pycram/designators/location_designator.py +++ b/src/pycram/designators/location_designator.py @@ -2,16 +2,18 @@ import time from enum import Enum -from tqdm import tqdm -from typing_extensions import List, Union, Iterable, Optional, Callable +from typing_extensions import List, Union, Iterable, Optional, Callable, Tuple from .object_designator import ObjectDesignatorDescription, ObjectPart from ..datastructures.world import World, UseProspectionWorld +from ..helper import adjust_grasp_for_object_rotation, calculate_object_faces, calculate_grasp_offset from ..local_transformer import LocalTransformer from ..ros.viz_marker_publisher import AxisMarkerPublisher +from ..utils import translate_relative_to_object from ..world_reasoning import link_pose_for_joint_config from ..designator import DesignatorError, LocationDesignatorDescription -from ..costmaps import OccupancyCostmap, VisibilityCostmap, SemanticCostmap, GaussianCostmap +from ..costmaps import OccupancyCostmap, VisibilityCostmap, SemanticCostmap, GaussianCostmap, \ + DirectionalCostmap from ..datastructures.enums import JointType, Arms, Grasp from ..pose_generator_and_validator import PoseGenerator, visibility_validator, reachability_validator from ..robot_description import RobotDescription @@ -105,30 +107,37 @@ def __iter__(self) -> Iterable[Location]: class CostmapLocation(LocationDesignatorDescription): """ - Uses Costmaps to create locations for complex constrains + Uses costmaps to create locations based on complex constraints, such as reachability and visibility. """ @dataclasses.dataclass class Location(LocationDesignatorDescription.Location): reachable_arms: List[Arms] """ - List of arms with which the pose can be reached, is only used when the 'rechable_for' parameter is used + List of arms with which the pose can be reached; only relevant when the `reachable_for` parameter is specified. """ def __init__(self, target: Union[Pose, ObjectDesignatorDescription.Object], reachable_for: Optional[ObjectDesignatorDescription.Object] = None, visible_for: Optional[ObjectDesignatorDescription.Object] = None, reachable_arm: Optional[Arms] = None, resolver: Optional[Callable] = None, - used_grasps: Optional[List[Enum]] = None): - """ - Location designator that uses costmaps as base to calculate locations for complex constrains like reachable or - visible. In case of reachable the resolved location contains a list of arms with which the location is reachable. - - :param target: Location for which visibility or reachability should be calculated - :param reachable_for: Object for which the reachability should be calculated, usually a robot - :param visible_for: Object for which the visibility should be calculated, usually a robot - :param reachable_arm: An optional arm with which the target should be reached - :param resolver: An alternative specialized_designators that returns a resolved location for the given input of this description + used_grasps: Optional[List[Enum]] = None, + object_in_hand: Optional[ObjectDesignatorDescription.Object] = None): + """ + Initializes a location designator that uses costmaps to calculate locations based on complex constraints, + such as reachability and visibility. + + Args: + target (Union[Pose, ObjectDesignatorDescription.Object]): The location for which visibility or reachability + should be calculated. + reachable_for (Optional[ObjectDesignatorDescription.Object]): The object (usually a robot) for which + reachability is calculated. + visible_for (Optional[ObjectDesignatorDescription.Object]): The object (usually a robot) for which + visibility is calculated. + reachable_arm (Optional[Arms]): An optional arm with which the target should be reached. + resolver (Optional[Callable]): An alternative function that resolves a location for the input of this description. + used_grasps (Optional[List[Enum]]): A list of grasps to use to reach the target. + object_in_hand (Optional[ObjectDesignatorDescription.Object]): The object currently held by the robot, if any. """ super().__init__(resolver) self.target: Union[Pose, ObjectDesignatorDescription.Object] = target @@ -136,6 +145,7 @@ def __init__(self, target: Union[Pose, ObjectDesignatorDescription.Object], self.visible_for: ObjectDesignatorDescription.Object = visible_for self.reachable_arm: Optional[Arms] = reachable_arm self.used_grasps: Optional[List[Enum]] = used_grasps + self.object_in_hand: Optional[ObjectDesignatorDescription.Object] = object_in_hand def ground(self) -> Location: """ @@ -147,17 +157,18 @@ def ground(self) -> Location: def __iter__(self): """ - Generates positions for a given set of constrains from a costmap and returns - them. The generation is based of a costmap which itself is the product of - merging costmaps, each for a different purpose. In any case an occupancy costmap - is used as the base, then according to the given constrains a visibility or - gaussian costmap is also merged with this. Once the costmaps are merged, - a generator generates pose candidates from the costmap. Each pose candidate - is then validated against the constraints given by the designator if all validators - pass the pose is considered valid and yielded. - - :yield: An instance of CostmapLocation.Location with a valid position that satisfies the given constraints - """ + Generates valid positions based on constraints derived from a costmap, yielding each + that satisfies the given requirements. + + This method merges multiple costmaps, each with a unique purpose (e.g., occupancy, + visibility, reachability) to form a comprehensive costmap. From this combined map, + candidate poses are generated and validated against constraints defined in the designator. + Each pose that meets the specified constraints is yielded as a valid location. + + Yields: + CostmapLocation.Location: A location instance with a valid position that satisfies + the constraints, including reachability and visibility as applicable. + """ min_height = RobotDescription.current_robot_description.get_default_camera().minimal_height max_height = RobotDescription.current_robot_description.get_default_camera().maximal_height # This ensures that the costmaps always get a position as their origin. @@ -166,56 +177,79 @@ def __iter__(self): else: target_pose = self.target.copy() - # ground_pose = [[target_pose[0][0], target_pose[0][1], 0], target_pose[1]] ground_pose = Pose(target_pose.position_as_list()) ground_pose.position.z = 0 distance_to_obstacle = RobotDescription.current_robot_description.get_costmap_offset() - if self.used_grasps: - if self.used_grasps[0] == Grasp.TOP: - if RobotDescription.current_robot_description.name == "tiago_dual": - distance_to_obstacle = 0.05 - elif RobotDescription.current_robot_description.name == "pr2": - distance_to_obstacle = 0.15 - occupancy = OccupancyCostmap(distance_to_obstacle, False, 200, 0.02, ground_pose) - final_map = occupancy + max_reach = RobotDescription.current_robot_description.get_max_reach() + map_size: int = int(max_reach * 100 * 3) + map_resolution = 0.15 + + occupancy = OccupancyCostmap(distance_to_obstacle, False, map_size * 2, map_resolution, ground_pose) + final_map = occupancy if self.reachable_for: - gaussian = GaussianCostmap(200, 15, 0.02, ground_pose) + distance = (distance_to_obstacle + max_reach) / 2 + gaussian = GaussianCostmap(map_size, 1.5, map_resolution, ground_pose, True, distance) final_map += gaussian if self.visible_for: - visible = VisibilityCostmap(min_height, max_height, 200, 0.02, Pose(target_pose.position_as_list())) + visible = VisibilityCostmap(min_height, max_height, map_size, map_resolution, + Pose(target_pose.position_as_list())) final_map += visible + directional = DirectionalCostmap(map_size * 3, self.used_grasps[0], map_resolution, target_pose, + self.object_in_hand is not None) + final_map *= directional + + if final_map.world.allow_publish_debug_poses: + directional.publish() + time.sleep(1) + final_map.publish(weighted=True) + if self.visible_for or self.reachable_for: robot_object = self.visible_for.world_object if self.visible_for else self.reachable_for.world_object test_robot = World.current_world.get_prospection_object_for_object(robot_object) - gripper_pose = None - if RobotDescription.current_robot_description.name == "tiago_dual": - if self.used_grasps: - grasp = \ - RobotDescription.current_robot_description.get_arm_chain(self.reachable_arm).end_effector.grasps[ - self.used_grasps[0]] - target_pose = target_pose.copy() - target_pose.multiply_quaternions(grasp) - - gripper_pose = World.robot.get_link_pose( - RobotDescription.current_robot_description.get_arm_chain(self.reachable_arm).get_tool_frame()) - - # final_map.visualize() + + if self.object_in_hand: + local_transformer = LocalTransformer() + object_pose = self.object_in_hand.world_object.get_pose() + target_pose = self.target.copy() + tcp_to_object = local_transformer.transform_pose(object_pose, + World.robot.get_link_tf_frame( + RobotDescription.current_robot_description.get_arm_chain( + self.reachable_arm).get_tool_frame())) + + target_pose = target_pose.to_transform("target").inverse_times( + tcp_to_object.to_transform("object")).to_pose() + else: + adjusted_grasp = adjust_grasp_for_object_rotation(target_pose, self.used_grasps[0], self.reachable_arm) + target_pose = target_pose.copy() + target_pose.set_orientation(adjusted_grasp) + grasp_offset = calculate_grasp_offset(self.target.world_object.get_object_dimensions(), self.reachable_arm, + self.used_grasps[0]) + + palm_axis = RobotDescription.current_robot_description.get_palm_axis() + + target_pose = translate_relative_to_object(target_pose, palm_axis, grasp_offset) + with UseProspectionWorld(): for maybe_pose in PoseGenerator(final_map, number_of_samples=600): - if gripper_pose: + if final_map.world.allow_publish_debug_poses: + gripper_pose = World.robot.get_link_pose( + RobotDescription.current_robot_description.get_arm_chain(self.reachable_arm).get_tool_frame()) marker = AxisMarkerPublisher() - marker.publish([maybe_pose, target_pose, gripper_pose], length=0.5) + marker.publish([maybe_pose, target_pose, gripper_pose], length=0.3) res = True arms = None + if self.visible_for: res = res and visibility_validator(maybe_pose, test_robot, target_pose, World.current_world) if self.reachable_for: - hand_links = [] - for description in RobotDescription.current_robot_description.get_manipulator_chains(): - hand_links += description.end_effector.links + hand_links = [ + link + for description in RobotDescription.current_robot_description.get_manipulator_chains() + for link in description.end_effector.links + ] valid, arms = reachability_validator(maybe_pose, test_robot, target_pose, allowed_collision={test_robot: hand_links}) if self.reachable_arm: @@ -228,24 +262,29 @@ def __iter__(self): class AccessingLocation(LocationDesignatorDescription): """ - Location designator which describes poses used for opening drawers + Designates a location for accessing and opening drawers. + + This designator provides the robot with a pose for interacting with drawer handles. + Calculating this position before the drawer is opened is recommended to avoid + potential issues with pose estimation. """ @dataclasses.dataclass class Location(LocationDesignatorDescription.Location): arms: List[Arms] """ - List of arms that can be used to for accessing from this pose + List of arms that can be used for accessing from this pose. """ def __init__(self, handle_desig: ObjectPart.Object, robot_desig: ObjectDesignatorDescription.Object, resolver=None): """ - Describes a position from where a drawer can be opened. For now this position should be calculated before the - drawer will be opened. Calculating the pose while the drawer is open could lead to problems. + Initializes a location designator for accessing a drawer handle. + + Args: + handle_desig (ObjectPart.Object): The designator for the drawer handle to be accessed. + robot_desig (ObjectDesignatorDescription.Object): The designator for the robot that will perform the action. + resolver (Optional[Callable]): An optional custom resolver function for location creation. - :param handle_desig: ObjectPart designator for handle of the drawer - :param robot: Object designator for the robot which should open the drawer - :param resolver: An alternative specialized_designators to create the location """ super().__init__(resolver) self.handle: ObjectPart.Object = handle_desig @@ -259,33 +298,31 @@ def ground(self) -> Location: """ return next(iter(self)) - def __iter__(self) -> Location: + def __iter__(self) -> Tuple[Location, Location]: """ - Creates poses from which the robot can open the drawer specified by the ObjectPart designator describing the - handle. Poses are validated by checking if the robot can grasp the handle while the drawer is closed and if - the handle can be grasped if the drawer is open. + Generates poses for the robot to access and open a drawer specified by the handle + designator. Poses are validated to ensure the robot can grasp the handle while the drawer + is closed and that the handle can still be grasped when the drawer is open. + + The process involves generating candidate poses using a merged costmap that incorporates + occupancy, Gaussian, and directional costmaps. For each candidate pose, reachability + validation checks are performed for both the initial and goal (fully opened) positions + of the drawer. Only poses that satisfy all constraints are yielded. - :yield: A location designator containing the pose and the arms that can be used. + Yields: + Tuple[AccessingLocation.Location, AccessingLocation.Location]: A tuple containing + two location designators, one for the initial and one for the goal pose, with + the arms that can be used for each. """ - # ground_pose = [[self.handle.part_pose[0][0], self.handle.part_pose[0][1], 0], self.handle.part_pose[1]] ground_pose = Pose(self.handle.part_pose.position_as_list()) ground_pose.position.z = 0 - distance_to_obstacle = RobotDescription.current_robot_description.get_costmap_offset() - - if self.robot.name == "tiago_dual": - distance_to_obstacle = 0.4 - - occupancy = OccupancyCostmap(distance_to_obstacle=distance_to_obstacle, from_ros=False, size=200, - resolution=0.02, - origin=ground_pose) - gaussian = GaussianCostmap(200, 15, 0.02, ground_pose) - - final_map = occupancy + gaussian - test_robot = World.current_world.get_prospection_object_for_object(self.robot) # Find a Joint of type prismatic which is above the handle in the URDF tree - container_joint = self.handle.world_object.find_joint_above_link(self.handle.name, JointType.PRISMATIC) + if self.handle.name == "handle_cab3_door_top": + container_joint = self.handle.world_object.find_joint_above_link(self.handle.name, JointType.REVOLUTE) + else: + container_joint = self.handle.world_object.find_joint_above_link(self.handle.name, JointType.PRISMATIC) init_pose = link_pose_for_joint_config(self.handle.world_object, { container_joint: self.handle.world_object.get_joint_limits(container_joint)[0]}, @@ -301,34 +338,74 @@ def __iter__(self) -> Location: container_joint: self.handle.world_object.get_joint_limits(container_joint)[1] / 1.5}, self.handle.name) - if self.robot.name == "tiago_dual": - init_pose.set_orientation([0, 0, 0, 1]) - goal_pose.set_orientation([0, 0, 0, 1]) - half_pose.set_orientation([0, 0, 0, 1]) + grasp = calculate_object_faces(self.handle)[0] + grasp = Grasp.FRONT + original_init_pose = init_pose.copy() + init_pose, half_pose, goal_pose = init_pose.copy(), half_pose.copy(), goal_pose.copy() - grasp = RobotDescription.current_robot_description.get_arm_chain(Arms.RIGHT).end_effector.grasps[ - Grasp.FRONT] - init_pose.multiply_quaternions(grasp) - goal_pose.multiply_quaternions(grasp) - half_pose.multiply_quaternions(grasp) + adjusted_init_pose_grasp = adjust_grasp_for_object_rotation(init_pose, grasp, Arms.LEFT) + adjusted_half_pose_grasp = adjust_grasp_for_object_rotation(half_pose, grasp, Arms.LEFT) + adjusted_goal_pose_grasp = adjust_grasp_for_object_rotation(goal_pose, grasp, Arms.LEFT) - with UseProspectionWorld(): - for maybe_pose in PoseGenerator(final_map, number_of_samples=600, - orientation_generator=lambda p, o: PoseGenerator.generate_orientation(p, - half_pose)): + init_pose.set_orientation(adjusted_init_pose_grasp) + half_pose.set_orientation(adjusted_half_pose_grasp) + goal_pose.set_orientation(adjusted_goal_pose_grasp) - hand_links = [] - for description in RobotDescription.current_robot_description.get_manipulator_chains(): - hand_links += description.links + distance_to_obstacle = RobotDescription.current_robot_description.get_costmap_offset() + max_reach = RobotDescription.current_robot_description.get_max_reach() + map_size: int = int(max_reach * 100 * 2.5) + map_resolution = 0.15 + + # TODO: find better strategy for distance_to_obstacle + occupancy = OccupancyCostmap(distance_to_obstacle, False, map_size * 2, map_resolution, ground_pose) + distance = (distance_to_obstacle + max_reach) / 2 + gaussian = GaussianCostmap(map_size, 1.5, map_resolution, ground_pose, True, distance) + final_map = occupancy + gaussian - valid_init, arms_init = reachability_validator(maybe_pose, test_robot, init_pose, - allowed_collision={test_robot: hand_links}) + directional = DirectionalCostmap(map_size*3, Grasp.FRONT, map_resolution, original_init_pose) + final_map *= directional - valid_goal, arms_goal = reachability_validator(maybe_pose, test_robot, goal_pose, - allowed_collision={test_robot: hand_links}) + if final_map.world.allow_publish_debug_poses: + final_map.publish(weighted=True) + + with (UseProspectionWorld()): + for init_maybe_pose in PoseGenerator(final_map, number_of_samples=600, + orientation_generator=lambda p, o: PoseGenerator.generate_orientation( + p, + half_pose)): + if final_map.world.allow_publish_debug_poses: + marker = AxisMarkerPublisher() + marker.publish([init_pose, half_pose, goal_pose, init_maybe_pose], length=0.5) + + hand_links = [ + link + for description in RobotDescription.current_robot_description.get_manipulator_chains() + for link in description.end_effector.links + ] + + valid_init, arms_init = reachability_validator(init_maybe_pose, test_robot, init_pose, + allowed_collision={test_robot: hand_links}, translation_value=0.05) + if valid_init: + valid_goal, arms_goal = reachability_validator(init_maybe_pose, test_robot, goal_pose, + allowed_collision={test_robot: hand_links}, translation_value=0.05) + goal_maybe_pose = init_maybe_pose.copy() + + if not valid_goal: + mapThandle = init_pose.to_transform("init_handle") + mapTmaybe = init_maybe_pose.to_transform("init_maybe") + handleTmaybe = mapThandle.invert() * mapTmaybe + goal_maybe_pose = (goal_pose.to_transform("goal_handle") * handleTmaybe).to_pose() + + if final_map.world.allow_publish_debug_poses: + marker = AxisMarkerPublisher() + marker.publish([goal_maybe_pose], length=0.5) + + valid_goal, arms_goal = reachability_validator(goal_maybe_pose, test_robot, goal_pose, + allowed_collision={test_robot: hand_links}) if valid_init and valid_goal and not set(arms_init).isdisjoint(set(arms_goal)): - yield self.Location(maybe_pose, list(set(arms_init).intersection(set(arms_goal)))) + yield self.Location(init_maybe_pose, list(set(arms_init).intersection(set(arms_goal)))), \ + self.Location(goal_maybe_pose, list(set(arms_init).intersection(set(arms_goal)))) class SemanticCostmapLocation(LocationDesignatorDescription): diff --git a/src/pycram/designators/motion_designator.py b/src/pycram/designators/motion_designator.py index 5e1174edd..74f4919b2 100644 --- a/src/pycram/designators/motion_designator.py +++ b/src/pycram/designators/motion_designator.py @@ -2,6 +2,8 @@ from dataclasses import dataclass from sqlalchemy.orm import Session + +from .location_designator import AccessingLocation from .object_designator import ObjectDesignatorDescription, ObjectPart, RealObject from ..designator import ResolutionError from ..orm.base import ProcessMetaData @@ -266,6 +268,10 @@ class OpeningMotion(BaseMotion): """ Arm that should be used """ + goal_location: Optional[AccessingLocation.Location] + """ + The goal location for the opening + """ @with_tree def perform(self): @@ -298,6 +304,10 @@ class ClosingMotion(BaseMotion): """ Arm that should be used """ + goal_location: Optional[AccessingLocation.Location] + """ + The goal location for the closing + """ @with_tree def perform(self): diff --git a/src/pycram/helper.py b/src/pycram/helper.py index 73cf77dbc..fe936b649 100644 --- a/src/pycram/helper.py +++ b/src/pycram/helper.py @@ -1,8 +1,20 @@ +import numpy as np + +from pycram.datastructures.enums import Grasp +from pycram.local_transformer import LocalTransformer +from pycram.robot_description import RobotDescription +from scipy.spatial.transform import Rotation as R +from typing_extensions import List + +from pycram.ros.viz_marker_publisher import AxisMarkerPublisher + """Implementation of helper functions and classes for internal usage only. Classes: Singleton -- implementation of singleton metaclass """ + + class Singleton(type): """ Metaclass for singletons @@ -16,4 +28,136 @@ class Singleton(type): def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] \ No newline at end of file + return cls._instances[cls] + + +INDEX_TO_AXIS = {0: 'x', 1: 'y', 2: 'z'} +AXIS_TO_INDEX = {'x': 0, 'y': 1, 'z': 2} +AXIS_INDEX_TO_FACE = { + ('x', -1): Grasp.FRONT, + ('x', 1): Grasp.BACK, + ('y', 1): Grasp.LEFT, + ('y', -1): Grasp.RIGHT, + ('z', -1): Grasp.TOP, + ('z', 1): Grasp.BOTTOM +} + +FACE_TO_AXIS_INDEX = { + Grasp.FRONT: ('x', -1), + Grasp.BACK: ('x', 1), + Grasp.LEFT: ('y', 1), + Grasp.RIGHT: ('y', -1), + Grasp.TOP: ('z', -1), + Grasp.BOTTOM: ('z', 1) +} + + +def calculate_vector_face(vector: List): + """ + Determines the face of the object based on the input vector. + + Args: + vector (List): A 3D vector representing one of the robot's axes in the object's frame. + + Returns: + str: The corresponding face of the object. + """ + max_index = np.argmax(np.abs(vector)) + max_sign = int(np.sign(vector[max_index])) + axis = INDEX_TO_AXIS[max_index] + + return AXIS_INDEX_TO_FACE[(axis, max_sign)] + + +def calculate_object_faces(object): + """ + Calculates the faces of an object relative to the robot based on orientation. + + This method determines the face of the object that is directed towards the robot, + as well as the bottom face, by calculating vectors aligned with the robot's negative x-axis + and negative z-axis in the object's frame. + + Args: + object (Object): The object whose faces are to be calculated, with an accessible pose attribute. + + Returns: + list: A list containing two Grasp Enums, where the first element is the face of the object facing the robot, + and the second element is the top or bottom face of the object. + """ + local_transformer = LocalTransformer() + oTm = object.pose + + base_link = RobotDescription.current_robot_description.base_link + marker = AxisMarkerPublisher() + base_link_pose = object.world_object.world.robot.get_link_pose(base_link) + + marker.publish([base_link_pose]) + + oTb = local_transformer.transform_pose(oTm, object.world_object.world.robot.get_link_tf_frame(base_link)) + orientation = oTb.orientation_as_list() + + rotation_matrix = R.from_quat([orientation[0], orientation[1], orientation[2], orientation[3]]).inv().as_matrix() + + robot_negative_x_vector = -rotation_matrix[:, 0] + robot_negative_z_vector = -rotation_matrix[:, 2] + + facing_robot_face = calculate_vector_face(robot_negative_x_vector) + bottom_face = calculate_vector_face(robot_negative_z_vector) + + return [facing_robot_face, bottom_face] + + +def calculate_grasp_offset(object_dim: List, arm, grasp): + """ + Calculates the grasp offset of an object based on its dimensions and the desired grasp type. + + This method adjusts the object's position along the specified axis to account for grasping + constraints, based on the arm's tool frame offset and the object's half-dimensions. + + Args: + object (Object): The object to be grasped, with a pose attribute that includes position. + object_dim (List[float]): Dimensions of the object in each axis [x, y, z]. + arm (Enum): The arm used for grasping, needed to determine tool frame offset. + grasp (Enum): The desired grasp type, which determines the grasp axis and direction. + + Returns: + offset: Translation offset of the object for grasping. + """ + axis, _ = FACE_TO_AXIS_INDEX[grasp] + + object_half_dimension = object_dim[AXIS_TO_INDEX[axis]] / 2 + + tool_frame_offset = RobotDescription.current_robot_description.get_distance_palm_to_tool_frame(arm)/2 + + offset_value = max(0, object_half_dimension-tool_frame_offset) + + return offset_value + + +def adjust_grasp_for_object_rotation(object, grasp, arm): + """ + Adjusts the grasp orientation based on the object's current orientation. + + This function combines the specified grasp orientation with the object's current orientation + to produce the final orientation needed for the end effector to grasp the object correctly. + + Args: + object (Object): The object to be grasped, with an orientation accessible as a quaternion list. + grasp (Enum): The specified grasp type, used to retrieve the predefined grasp orientation. + arm (Enum): The arm used for grasping, needed to access the end effector's grasp orientations. + + Returns: + list: A quaternion [x, y, z, w] representing the adjusted grasp orientation. + + # TODO: currently redundant, can also call pycram.datastructures.pose.Pose.multiply_quaternions with some preperation + """ + grasp_orientation = RobotDescription.current_robot_description.get_arm_chain(arm).end_effector.grasps[grasp] + x1, y1, z1, w1 = grasp_orientation + x2, y2, z2, w2 = object.orientation_as_list() + + w = w2 * w1 - x2 * x1 - y2 * y1 - z2 * z1 + x = w2 * x1 + x2 * w1 + y2 * z1 - z2 * y1 + y = w2 * y1 - x2 * z1 + y2 * w1 + z2 * x1 + z = w2 * z1 + x2 * y1 - y2 * x1 + z2 * w1 + + return [x, y, z, w] diff --git a/src/pycram/object_descriptors/urdf.py b/src/pycram/object_descriptors/urdf.py index 75ab98a03..a165e4b41 100644 --- a/src/pycram/object_descriptors/urdf.py +++ b/src/pycram/object_descriptors/urdf.py @@ -16,7 +16,7 @@ LinkDescription as AbstractLinkDescription, ObjectDescription as AbstractObjectDescription from ..datastructures.dataclasses import Color, VisualShape, BoxVisualShape, CylinderVisualShape, \ SphereVisualShape, MeshVisualShape -from ..utils import suppress_stdout_stderr +from ..utils import load_urdf_silently, suppress_stdout_stderr class LinkDescription(AbstractLinkDescription): @@ -176,7 +176,7 @@ def load_description(self, path) -> URDF: with open(path, 'r') as file: # Since parsing URDF causes a lot of warning messages which can't be deactivated, we suppress them with suppress_stdout_stderr(): - return URDF.from_xml_string(file.read()) + return load_urdf_silently(file.read(), from_string_instead_of_file=True) def generate_from_mesh_file(self, path: str, name: str, color: Optional[Color] = Color()) -> str: """ diff --git a/src/pycram/pose_generator_and_validator.py b/src/pycram/pose_generator_and_validator.py index e34c1d6ad..5ee9a32bd 100644 --- a/src/pycram/pose_generator_and_validator.py +++ b/src/pycram/pose_generator_and_validator.py @@ -1,7 +1,9 @@ import tf import numpy as np +from .datastructures.enums import Grasp from .datastructures.world import World +from .ros.viz_marker_publisher import AxisMarkerPublisher from .world_concepts.world_object import Object from .world_reasoning import contact from .costmaps import Costmap @@ -10,7 +12,7 @@ from .robot_description import RobotDescription from .external_interfaces.ik import request_ik from .plan_failures import IKError -from .utils import _apply_ik +from .utils import _apply_ik, translate_relative_to_object from typing_extensions import Tuple, List, Union, Dict, Iterable @@ -32,9 +34,19 @@ class PoseGenerator: def __init__(self, costmap: Costmap, number_of_samples=100, orientation_generator=None): """ - :param costmap: The costmap from which poses should be sampled. - :param number_of_samples: The number of samples from the costmap that should be returned at max - :param orientation_generator: function that generates an orientation given a position and the origin of the costmap + Initializes a PoseGenerator for sampling poses from a given costmap. + + This class generates sampled poses within the specified costmap, using a defined number of samples + and an optional orientation generator. If no orientation generator is provided, a default generator + is used. + + Args: + costmap (Costmap): The costmap from which poses should be sampled. + number_of_samples (int, optional): Maximum number of samples to be returned from the costmap. Defaults to 100. + orientation_generator (callable, optional): Function that generates an orientation given a position and the origin of the costmap. + + Returns: + None """ if not PoseGenerator.current_orientation_generator: @@ -48,23 +60,42 @@ def __init__(self, costmap: Costmap, number_of_samples=100, orientation_generato def __iter__(self) -> Iterable: """ - A generator that crates pose candidates from a given costmap. The generator - selects the highest 100 values and returns the corresponding positions. - Orientations are calculated such that the Robot faces the center of the costmap. + Generates pose candidates from a costmap, selecting the highest 100 values and + returning the corresponding positions. The orientations are calculated such that + the robot faces the center of the costmap. - :Yield: A tuple of position and orientation + Yields: + Pose: A Pose object containing position and orientation. """ - # Determines how many positions should be sampled from the costmap + np.random.seed(42) + if self.number_of_samples == -1: self.number_of_samples = self.costmap.map.flatten().shape[0] - indices = np.argpartition(self.costmap.map.flatten(), -self.number_of_samples)[-self.number_of_samples:] - indices = np.dstack(np.unravel_index(indices, self.costmap.map.shape)).reshape(self.number_of_samples, 2) + number_of_samples = min(self.number_of_samples, self.costmap.map.size) - height = self.costmap.map.shape[0] - width = self.costmap.map.shape[1] + height, width = self.costmap.map.shape center = np.array([height // 2, width // 2]) - for ind in indices: + + flat_values = self.costmap.map.flatten() + + # Filter non-zero weights and adjust number_of_samples accordingly + non_zero_indices = np.nonzero(flat_values)[0] + non_zero_weights = flat_values[non_zero_indices] + number_of_samples = min(number_of_samples, len(non_zero_indices)) + + if non_zero_weights.sum() > 0: + sampled_indices = np.random.choice(non_zero_indices, size=number_of_samples, replace=False, + p=non_zero_weights / non_zero_weights.sum()) + else: + sampled_indices = [] + + indices = np.dstack(np.unravel_index(sampled_indices, self.costmap.map.shape)).reshape(number_of_samples, 2) + + sampled_weights = flat_values[sampled_indices] + sorted_indices = indices[np.argsort(-sampled_weights)] + + for ind in sorted_indices: if self.costmap.map[ind[0]][ind[1]] == 0: continue # The position is calculated by creating a vector from the 2D position in the costmap (given by x and y) @@ -141,41 +172,57 @@ def visibility_validator(pose: Pose, def _in_contact(robot: Object, obj: Object, allowed_collision: Dict[Object, List[str]], allowed_robot_links: List[str]) -> bool: """ - This method checks if a given robot is in contact with a given object. - - :param robot: The robot object that should be checked for contact. - :param obj: The object that should be checked for contact with the robot. - :param allowed_collision: A dictionary that contains the allowed collisions for links of each object in the world. - :param allowed_robot_links: A list of links of the robot that are allowed to be in contact with the object. - :return: True if the robot is in contact with the object and False otherwise. + Checks if the specified robot is in contact with a given object, while accounting for + allowed collisions on specific links. + + Args: + robot (Object): The robot to check for contact. + obj (Object): The object to check for contact with the robot. + allowed_collision (Dict[Object, List[str]]): A dictionary of objects with lists of + link names allowed to collide. + allowed_robot_links (List[str]): A list of robot link names that are allowed to be in + contact with the object. + + Returns: + bool: True if the robot is in contact with the object on non-allowed links, False otherwise. """ in_contact, contact_links = contact(robot, obj, return_links=True) - allowed_links = allowed_collision[obj] if obj in allowed_collision.keys() else [] - - if in_contact: - for link in contact_links: - if link[0].name in allowed_robot_links or link[1].name in allowed_links: - in_contact = False - # TODO: in_contact is never set to True after it was set to False is that correct? - # TODO: If it is correct, then this loop should break after the first contact is found - return in_contact + if not in_contact: + return False + + allowed_links = allowed_collision.get(obj, []) + + for link in contact_links: + if link[0].name not in allowed_robot_links and link[1].name not in allowed_links: + return True + + return False + def reachability_validator(pose: Pose, robot: Object, target: Union[Object, Pose], - allowed_collision: Dict[Object, List] = None) -> Tuple[bool, List]: + allowed_collision: Dict[Object, List] = None, + translation_value: float = 0.1) -> Tuple[bool, List]: """ - This method validates if a target position is reachable for a pose candidate. - This is done by asking the ik solver if there is a valid solution if the - robot stands at the position of the pose candidate. if there is a solution - the validator returns True and False in any other case. - - :param pose: The pose candidate for which the reachability should be validated - :param robot: The robot object in the World for which the reachability should be validated. - :param target: The target position or object instance which should be the target for reachability. - :param allowed_collision: dict of objects with which the robot is allowed to collide each object correlates to a list of links of which this object consists - :return: True if the target is reachable for the robot and False in any other case. + Validates if a target position is reachable for a given pose candidate. + + This method uses an IK solver to determine if a valid solution exists for the robot + standing at the specified pose. If a solution is found, the validator returns `True`; + otherwise, it returns `False`. + + Args: + pose (Pose): The pose candidate for which reachability should be validated. + robot (Object): The robot object in the world for which reachability is being validated. + target (Union[Object, Pose]): The target position or object that should be reachable. + allowed_collision (Dict[Object, List], optional): A dictionary of objects with which + the robot is allowed to collide, where each object maps to a list of its constituent links. + + Returns: + Tuple[bool, List]: A tuple where the first element is `True` if the target is reachable + and `False` otherwise. The second element is a list of details about the solution or issues + encountered during validation. """ if type(target) == Object: target = target.get_pose() @@ -189,11 +236,6 @@ def reachability_validator(pose: Pose, res = False arms = [] for description in manipulator_descs: - retract_target_pose = LocalTransformer().transform_pose(target, robot.get_link_tf_frame(description.end_effector.tool_frame)) - retract_target_pose.position.x -= 0.07 # Care hard coded value copied from PlaceAction class - - # retract_pose needs to be in world frame? - retract_target_pose = LocalTransformer().transform_pose(retract_target_pose, "map") joints = description.joints tool_frame = description.end_effector.tool_frame @@ -210,7 +252,18 @@ def reachability_validator(pose: Pose, # _apply_ik(robot, resp, joints) in_contact = collision_check(robot, allowed_collision) + if not in_contact: # only check for retract pose if pose worked + + palm_axis = RobotDescription.current_robot_description.get_palm_axis() + + retract_target_pose = translate_relative_to_object(target, palm_axis, translation_value) + + retract_target_pose = LocalTransformer().transform_pose(retract_target_pose, "map") + + marker = AxisMarkerPublisher() + marker.publish([pose, retract_target_pose], length=0.3) + pose, joint_states = request_ik(retract_target_pose, robot, joints, tool_frame) robot.set_pose(pose) robot.set_joint_positions(joint_states) @@ -251,5 +304,3 @@ def collision_check(robot: Object, allowed_collision: Dict[Object, List]): in_contact= _in_contact(robot, obj, allowed_collision, allowed_robot_links) return in_contact - - diff --git a/src/pycram/process_modules/__init__.py b/src/pycram/process_modules/__init__.py index dc64d022f..78e3f196c 100644 --- a/src/pycram/process_modules/__init__.py +++ b/src/pycram/process_modules/__init__.py @@ -3,6 +3,7 @@ from .pr2_process_modules import Pr2Manager from .justin_process_modules import JustinManager from .icub_process_modules import ICubManager +from .armar_process_modules import ArmarManager #from .boxy_process_modules import BoxyManager #from .stretch_process_modules import StretchManager #from .hsrb_process_modules import HSRBManager @@ -13,6 +14,7 @@ JustinManager() ICubManager() TiagoManager() +ArmarManager() #BoxyManager() DonbotManager() #StretchManager() diff --git a/src/pycram/process_modules/armar_process_modules.py b/src/pycram/process_modules/armar_process_modules.py new file mode 100644 index 000000000..ba65c60c8 --- /dev/null +++ b/src/pycram/process_modules/armar_process_modules.py @@ -0,0 +1,7 @@ +from .default_process_modules import * + + +class ArmarManager(DefaultManager): + def __init__(self): + super().__init__() + self.robot_name = "Armar6" diff --git a/src/pycram/process_modules/default_process_modules.py b/src/pycram/process_modules/default_process_modules.py index d89fb2ba4..d83716ec7 100644 --- a/src/pycram/process_modules/default_process_modules.py +++ b/src/pycram/process_modules/default_process_modules.py @@ -3,9 +3,10 @@ from typing_extensions import Any from scipy.spatial.transform import Rotation as R -from ..datastructures.enums import JointType +from ..datastructures.enums import JointType, Grasp from ..external_interfaces.ik import request_ik from ..external_interfaces.robokudo import query_object +from ..helper import adjust_grasp_for_object_rotation from ..utils import _apply_ik from ..process_module import ProcessModule from ..external_interfaces import giskard @@ -29,24 +30,22 @@ def _execute(self, desig: MoveMotion): class DefaultMoveHead(ProcessModule): """ - This process module moves the head to look at a specific point in the world coordinate frame. - This point can either be a position or an object. + Moves the robot's head to look at a specified target point in the world coordinate frame. + The target can be either a position or an object. """ def _execute(self, desig: LookingMotion): target = desig.target robot = World.robot - local_transformer = LocalTransformer() - # since its usually joint1, link1, joint2, link2, to be able to catch joint1, even though we only use the - # kinematic chain from link1 to link2, we need to use the link0 as the first link, even though its technically - # not in this kinematic chain - pan_link = RobotDescription.current_robot_description.kinematic_chains["neck"].links[1] - tilt_link = RobotDescription.current_robot_description.kinematic_chains["neck"].links[2] + neck = RobotDescription.current_robot_description.get_neck() + pan_link = neck["yaw"][0] + tilt_link = neck["pitch"][0] + + pan_joint = neck["yaw"][1] + tilt_joint = neck["pitch"][1] - pan_joint = RobotDescription.current_robot_description.kinematic_chains["neck"].joints[0] - tilt_joint = RobotDescription.current_robot_description.kinematic_chains["neck"].joints[1] pose_in_pan = local_transformer.transform_pose(target, robot.get_link_tf_frame(pan_link)).position_as_list() pose_in_tilt = local_transformer.transform_pose(target, robot.get_link_tf_frame(tilt_link)).position_as_list() @@ -59,6 +58,10 @@ def _execute(self, desig: LookingMotion): new_tilt = -np.arctan2(rotation_tilt_offset[2], np.sqrt(rotation_tilt_offset[0] ** 2 + rotation_tilt_offset[1] ** 2)) + # @TODO: find a better way to handle this + if RobotDescription.current_robot_description.name in {"iCub", "tiago_dual"}: + new_tilt = -new_tilt + current_pan = robot.get_joint_position(pan_joint) current_tilt = robot.get_joint_position(tilt_joint) @@ -149,6 +152,7 @@ def _execute(self, desig: MoveJointsMotion): robot.set_joint_positions(joint_poses) return ",", "," + class DefaultWorldStateDetecting(ProcessModule): """ This process moduledetectes an object even if it is not in the field of view of the robot. @@ -161,18 +165,31 @@ def _execute(self, desig: WorldStateDetectingMotion): class DefaultOpen(ProcessModule): """ - Low-level implementation of opening a container in the simulation. Assumes the handle is already grasped. + Low-level implementation for opening a container in the simulation. Assumes that + the handle has already been grasped by the robot. """ def _execute(self, desig: OpeningMotion): + # TODO: Should probably be a motion, but DefaultOpenReal does not do any of these calculations, so maybe its fine? Need to think about this part_of_object = desig.object_part.world_object - container_joint = part_of_object.find_joint_above_link(desig.object_part.name, JointType.PRISMATIC) + if desig.object_part.name == "handle_cab3_door_top": + container_joint = part_of_object.find_joint_above_link(desig.object_part.name, JointType.REVOLUTE) + else: + container_joint = part_of_object.find_joint_above_link(desig.object_part.name, JointType.PRISMATIC) goal_pose = link_pose_for_joint_config(part_of_object, { container_joint: part_of_object.get_joint_limits(container_joint)[1] - 0.05}, desig.object_part.name) - _move_arm_tcp(goal_pose, World.robot, desig.arm) + grasp = Grasp.FRONT + adjusted_grasp = adjust_grasp_for_object_rotation(goal_pose, grasp, desig.arm) + goal_pose = goal_pose.copy() + goal_pose.set_orientation(adjusted_grasp) + + if desig.goal_location: + _navigate_to_pose(desig.goal_location.pose, World.robot) + else: + _move_arm_tcp(goal_pose, World.robot, desig.arm) desig.object_part.world_object.set_joint_position(container_joint, part_of_object.get_joint_limits( @@ -181,18 +198,30 @@ def _execute(self, desig: OpeningMotion): class DefaultClose(ProcessModule): """ - Low-level implementation that lets the robot close a grasped container, in simulation + Low-level implementation for closing a grasped container in simulation. + Assumes the robot is already holding the container's handle. """ def _execute(self, desig: ClosingMotion): part_of_object = desig.object_part.world_object - container_joint = part_of_object.find_joint_above_link(desig.object_part.name, JointType.PRISMATIC) + if desig.object_part.name == "handle_cab3_door_top": + container_joint = part_of_object.find_joint_above_link(desig.object_part.name, JointType.REVOLUTE) + else: + container_joint = part_of_object.find_joint_above_link(desig.object_part.name, JointType.PRISMATIC) goal_pose = link_pose_for_joint_config(part_of_object, { container_joint: part_of_object.get_joint_limits(container_joint)[0]}, desig.object_part.name) - _move_arm_tcp(goal_pose, World.robot, desig.arm) + grasp = Grasp.FRONT + adjusted_grasp = adjust_grasp_for_object_rotation(goal_pose, grasp, desig.arm) + goal_pose = goal_pose.copy() + goal_pose.set_orientation(adjusted_grasp) + + if desig.goal_location: + _navigate_to_pose(desig.goal_location.pose, World.robot) + else: + _move_arm_tcp(goal_pose, World.robot, desig.arm) desig.object_part.world_object.set_joint_position(container_joint, part_of_object.get_joint_limits( @@ -208,6 +237,10 @@ def _move_arm_tcp(target: Pose, robot: Object, arm: Arms) -> None: _apply_ik(robot, inv) +def _navigate_to_pose(target: Pose, robot: Object) -> None: + robot.set_pose(target) + + ########################################################### ######### Process Modules for the Real Robot ############## ########################################################### @@ -224,8 +257,8 @@ def _execute(self, designator: MoveMotion) -> Any: class DefaultMoveHeadReal(ProcessModule): """ - Process module for the real robot to move that such that it looks at the given position. Uses the same calculation - as the simulated one + Process module for controlling the real robot's head to look at a specified position. + Uses the same calculations as the simulated version to orient the head. """ def _execute(self, desig: LookingMotion): @@ -234,14 +267,13 @@ def _execute(self, desig: LookingMotion): local_transformer = LocalTransformer() - # since its usually joint1, link1, joint2, link2, to be able to catch joint1, even though we only use the - # kinematic chain from link1 to link2, we need to use the link0 as the first link, even though its technically - # not in this kinematic chain - pan_link = RobotDescription.current_robot_description.kinematic_chains["neck"].links[1] - tilt_link = RobotDescription.current_robot_description.kinematic_chains["neck"].links[2] + neck = RobotDescription.current_robot_description.get_neck() + pan_link = neck["yaw"][0] + tilt_link = neck["pitch"][0] + + pan_joint = neck["yaw"][1] + tilt_joint = neck["pitch"][1] - pan_joint = RobotDescription.current_robot_description.kinematic_chains["neck"].joints[0] - tilt_joint = RobotDescription.current_robot_description.kinematic_chains["neck"].joints[1] pose_in_pan = local_transformer.transform_pose(target, robot.get_link_tf_frame(pan_link)).position_as_list() pose_in_tilt = local_transformer.transform_pose(target, robot.get_link_tf_frame(tilt_link)).position_as_list() @@ -254,9 +286,14 @@ def _execute(self, desig: LookingMotion): new_tilt = -np.arctan2(rotation_tilt_offset[2], np.sqrt(rotation_tilt_offset[0] ** 2 + rotation_tilt_offset[1] ** 2)) + # @TODO: find a better way to handle this + if RobotDescription.current_robot_description.name in {"iCub", "tiago_dual"}: + new_tilt = -new_tilt + current_pan = robot.get_joint_position(pan_joint) current_tilt = robot.get_joint_position(tilt_joint) + # TODO: Not sure that this is enough to move head in real world robot.set_joint_position(pan_joint, new_pan + current_pan) robot.set_joint_position(tilt_joint, new_tilt + current_tilt) @@ -371,59 +408,59 @@ def __init__(self): super().__init__("default") def navigate(self): - if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: + if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: return DefaultNavigation(self._navigate_lock) elif ProcessModuleManager.execution_type == "real": return DefaultNavigationReal(self._navigate_lock) def looking(self): - if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: + if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: return DefaultMoveHead(self._looking_lock) elif ProcessModuleManager.execution_type == "real": return DefaultMoveHeadReal(self._looking_lock) def detecting(self): - if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: + if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: return DefaultDetecting(self._detecting_lock) elif ProcessModuleManager.execution_type == "real": return DefaultDetectingReal(self._detecting_lock) def move_tcp(self): - if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: + if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: return DefaultMoveTCP(self._move_tcp_lock) elif ProcessModuleManager.execution_type == "real": return DefaultMoveTCPReal(self._move_tcp_lock) def move_arm_joints(self): - if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: + if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: return DefaultMoveArmJoints(self._move_arm_joints_lock) elif ProcessModuleManager.execution_type == "real": return DefaultMoveArmJointsReal(self._move_arm_joints_lock) def world_state_detecting(self): - if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: + if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: return DefaultWorldStateDetecting(self._world_state_detecting_lock) def move_joints(self): - if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: + if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: return DefaultMoveJoints(self._move_joints_lock) elif ProcessModuleManager.execution_type == "real": return DefaultMoveJointsReal(self._move_joints_lock) def move_gripper(self): - if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: + if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: return DefaultMoveGripper(self._move_gripper_lock) elif ProcessModuleManager.execution_type == "real": return DefaultMoveGripperReal(self._move_gripper_lock) def open(self): - if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: + if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: return DefaultOpen(self._open_lock) elif ProcessModuleManager.execution_type == "real": return DefaultOpenReal(self._open_lock) def close(self): - if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: + if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: return DefaultClose(self._close_lock) elif ProcessModuleManager.execution_type == "real": return DefaultCloseReal(self._close_lock) diff --git a/src/pycram/process_modules/donbot_process_modules.py b/src/pycram/process_modules/donbot_process_modules.py index 67c6911cb..9d91d9be0 100644 --- a/src/pycram/process_modules/donbot_process_modules.py +++ b/src/pycram/process_modules/donbot_process_modules.py @@ -1,38 +1,40 @@ from .default_process_modules import * +from ..utils import _apply_ik +from .default_process_modules import _move_arm_tcp -class DonbotMoveHead(ProcessModule): +class DonbotMoveHead(DefaultMoveHead): """ - This process module moves the head to look at a specific point in the world coordinate frame. - This point can either be a position or an object. + Moves the head of the iai_donbot robot to look at a specified point in the world coordinate frame. + This point can be a position or an object, and the orientation is calculated based on the + robot's base and camera alignment. """ - def _execute(self, desig): target = desig.target robot = World.robot - local_transformer = LocalTransformer() + base_frame_pose: Pose = robot.get_link_pose("ur5_base_link").copy() + base_frame_pose.position.z += 0.4 + base_position = np.array(base_frame_pose.position_as_list()) + + target_position = np.array(target.position_as_list()) + + look_vector = target_position - base_position + z_axis = look_vector / np.linalg.norm(look_vector) - pose_in_shoulder = local_transformer.transform_pose(target, robot.get_link_tf_frame("ur5_shoulder_link")) + up = -np.array(RobotDescription.current_robot_description.cameras["camera_link"].front_facing_axis) - if pose_in_shoulder.position.x >= 0 and pose_in_shoulder.position.x >= abs(pose_in_shoulder.position.y): - robot.set_joint_positions( - RobotDescription.current_robot_description.get_static_joint_chain("left_arm", "front")) - if pose_in_shoulder.position.y >= 0 and pose_in_shoulder.position.y >= abs(pose_in_shoulder.position.x): - robot.set_joint_positions( - RobotDescription.current_robot_description.get_static_joint_chain("left_arm", "arm_right")) - if pose_in_shoulder.position.x <= 0 and abs(pose_in_shoulder.position.x) > abs(pose_in_shoulder.position.y): - robot.set_joint_positions( - RobotDescription.current_robot_description.get_static_joint_chain("left_arm", "back")) - if pose_in_shoulder.position.y <= 0 and abs(pose_in_shoulder.position.y) > abs(pose_in_shoulder.position.x): - robot.set_joint_positions( - RobotDescription.current_robot_description.get_static_joint_chain("left_arm", "arm_left")) + x_axis = np.cross(up, z_axis) + x_axis /= np.linalg.norm(x_axis) - pose_in_shoulder = local_transformer.transform_pose(target, robot.get_link_tf_frame("ur5_shoulder_link")) + y_axis = np.cross(z_axis, x_axis) + rotation_matrix = np.array([x_axis, y_axis, z_axis]).T - new_pan = np.arctan2(pose_in_shoulder.position.y, pose_in_shoulder.position.x) + orientation_quat = R.from_matrix(rotation_matrix).as_quat() + adjusted_pose = Pose(base_position.tolist(), orientation_quat.tolist()) + _move_arm_tcp(adjusted_pose, robot, Arms.LEFT) - robot.set_joint_position("ur5_shoulder_pan_joint", new_pan + robot.get_joint_position("ur5_shoulder_pan_joint")) +# TODO: Also need to do DonbotMoveHeadReal class DonbotManager(DefaultManager): diff --git a/src/pycram/process_modules/tiago_process_modules.py b/src/pycram/process_modules/tiago_process_modules.py index eb266dc88..92dad808e 100644 --- a/src/pycram/process_modules/tiago_process_modules.py +++ b/src/pycram/process_modules/tiago_process_modules.py @@ -1,108 +1,7 @@ from .default_process_modules import * -from .default_process_modules import _move_arm_tcp -from ..datastructures.enums import Grasp - - -class TiagoMoveHead(DefaultMoveHead): - """ - This process module moves the head to look at a specific point in the world coordinate frame. - This point can either be a position or an object. - """ - - def _execute(self, desig: LookingMotion): - target = desig.target - robot = World.robot - - local_transformer = LocalTransformer() - - # since its usually joint1, link1, joint2, link2, to be able to catch joint1, even though we only use the - # kinematic chain from link1 to link2, we need to use the link0 as the first link, even though its technically - # not in this kinematic chain - pan_link = RobotDescription.current_robot_description.kinematic_chains["neck"].links[1] - tilt_link = RobotDescription.current_robot_description.kinematic_chains["neck"].links[2] - - pan_joint = RobotDescription.current_robot_description.kinematic_chains["neck"].joints[0] - tilt_joint = RobotDescription.current_robot_description.kinematic_chains["neck"].joints[1] - pose_in_pan = local_transformer.transform_pose(target, robot.get_link_tf_frame(pan_link)).position_as_list() - pose_in_tilt = local_transformer.transform_pose(target, robot.get_link_tf_frame(tilt_link)).position_as_list() - - new_pan = np.arctan2(pose_in_pan[1], pose_in_pan[0]) - - tilt_offset = RobotDescription.current_robot_description.get_offset(tilt_joint) - tilt_offset_rotation = tilt_offset.rotation if tilt_offset else [0, 0, 0] - rotation_tilt_offset = R.from_euler('xyz', tilt_offset_rotation).apply(pose_in_tilt) - - new_tilt = np.arctan2(rotation_tilt_offset[2], - np.sqrt(rotation_tilt_offset[0] ** 2 + rotation_tilt_offset[1] ** 2)) - - current_pan = robot.get_joint_position(pan_joint) - current_tilt = robot.get_joint_position(tilt_joint) - - robot.set_joint_position(pan_joint, new_pan + current_pan) - robot.set_joint_position(tilt_joint, new_tilt + current_tilt) - - -class TiagoOpen(DefaultOpen): - """ - Low-level implementation of opening a container in the simulation. Assumes the handle is already grasped. - """ - - def _execute(self, desig: OpeningMotion): - part_of_object = desig.object_part.world_object - - container_joint = part_of_object.find_joint_above_link(desig.object_part.name, JointType.PRISMATIC) - - goal_pose = link_pose_for_joint_config(part_of_object, { - container_joint: part_of_object.get_joint_limits(container_joint)[1] - 0.05}, desig.object_part.name) - - goal_pose.set_orientation([0, 0, 0, 1]) - grasp = RobotDescription.current_robot_description.get_arm_chain(Arms.RIGHT).end_effector.grasps[Grasp.FRONT] - goal_pose.multiply_quaternions(grasp) - - _move_arm_tcp(goal_pose, World.robot, desig.arm) - - desig.object_part.world_object.set_joint_position(container_joint, - part_of_object.get_joint_limits( - container_joint)[1]) - - -class TiagoClose(DefaultClose): - """ - Low-level implementation that lets the robot close a grasped container, in simulation - """ - - def _execute(self, desig: ClosingMotion): - part_of_object = desig.object_part.world_object - - container_joint = part_of_object.find_joint_above_link(desig.object_part.name, JointType.PRISMATIC) - - goal_pose = link_pose_for_joint_config(part_of_object, { - container_joint: part_of_object.get_joint_limits(container_joint)[0]}, desig.object_part.name) - - goal_pose.set_orientation([0, 0, 0, 1]) - grasp = RobotDescription.current_robot_description.get_arm_chain(Arms.RIGHT).end_effector.grasps[Grasp.FRONT] - goal_pose.multiply_quaternions(grasp) - - _move_arm_tcp(goal_pose, World.robot, desig.arm) - - desig.object_part.world_object.set_joint_position(container_joint, - part_of_object.get_joint_limits( - container_joint)[0]) class TiagoManager(DefaultManager): def __init__(self): super().__init__() self.robot_name = "tiago_dual" - - def looking(self): - if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: - return TiagoMoveHead(self._looking_lock) - - def open(self): - if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: - return TiagoOpen(self._open_lock) - - def close(self): - if ProcessModuleManager.execution_type == ExecutionType.SIMULATED: - return TiagoClose(self._close_lock) \ No newline at end of file diff --git a/src/pycram/robot_description.py b/src/pycram/robot_description.py index 2734b1ff5..2e4485fc7 100644 --- a/src/pycram/robot_description.py +++ b/src/pycram/robot_description.py @@ -3,11 +3,12 @@ from enum import Enum +import numpy as np import rospy -from typing_extensions import List, Dict, Union, Optional +from typing_extensions import List, Dict, Union, Optional, Tuple from urdf_parser_py.urdf import URDF -from .utils import suppress_stdout_stderr +from .utils import load_urdf_silently, suppress_stdout_stderr from .datastructures.enums import Arms, Grasp, GripperState, GripperType @@ -103,6 +104,10 @@ class RobotDescription: """ All joints defined in the URDF, by default fixed joints are not included """ + neck: Dict[str, List[str]] + """ + Dictionary of neck links and joints + """ def __init__(self, name: str, base_link: str, torso_link: str, torso_joint: str, urdf_path: str): """ @@ -120,13 +125,16 @@ def __init__(self, name: str, base_link: str, torso_link: str, torso_joint: str, self.torso_link = torso_link self.torso_joint = torso_joint with suppress_stdout_stderr(): - # Since parsing URDF causes a lot of warning messages which can't be deactivated, we suppress them - self.urdf_object = URDF.from_xml_file(urdf_path) + self.urdf_object = load_urdf_silently(urdf_path) self.kinematic_chains: Dict[str, KinematicChainDescription] = {} self.cameras: Dict[str, CameraDescription] = {} self.links: List[str] = [l.name for l in self.urdf_object.links] self.joints: List[str] = [j.name for j in self.urdf_object.joints] self.costmap_offset: float = 0.3 + self.max_reach = None + self.palm_axis = [0, 0, 1] + self.distance_palm_to_tool_frame_left = None + self.distance_palm_to_tool_frame_right = None def add_kinematic_chain_description(self, chain: KinematicChainDescription): """ @@ -322,6 +330,204 @@ def get_torso_joint(self) -> str: """ return self.torso_joint + def get_tool_frame_joint(self, arm: Arms) -> str: + """ + Retrieves the name of the tool frame joint for the specified arm. + + Args: + arm (Arms): The arm for which to retrieve the tool frame joint. + + Returns: + str: The name of the tool frame joint. + """ + chain = self.get_arm_chain(arm) + tool_frame = chain.end_effector.tool_frame + return self.get_parent(tool_frame) + + def set_distance_palm_to_tool_frame(self, arm: Arms, distance: float): + """ + Sets the distance between the palm and the tool frame for the specified arm. + + Args: + arm (Arms): The arm for which to set the distance. + distance (float): The distance to set between the palm and the tool frame. + """ + if arm == Arms.LEFT: + self.distance_palm_to_tool_frame_left = distance + elif arm == Arms.RIGHT: + self.distance_palm_to_tool_frame_right = distance + + def get_distance_palm_to_tool_frame(self, arm: Arms) -> float: + """ + Retrieves the distance between the palm and the tool frame for the specified arm. + + If the distance is not yet set, it calculates the distance based on the robot's + arm chain configuration and caches the result for future use. + + Args: + arm (Arms): The arm for which to retrieve the palm-to-tool-frame distance. + + Returns: + float: The distance between the palm and the tool frame. + """ + if arm == Arms.LEFT: + if not self.distance_palm_to_tool_frame_left: + chain = self.get_arm_chain(arm) + palm_link = chain.end_effector.start_link + tool_frame = chain.end_effector.tool_frame + distance = self.get_distance_between_links(palm_link, tool_frame) + return distance + else: + return self.distance_palm_to_tool_frame_left + + if arm == Arms.RIGHT: + if not self.distance_palm_to_tool_frame_right: + chain = self.get_arm_chain(arm) + palm_link = chain.end_effector.start_link + tool_frame = chain.end_effector.tool_frame + distance = self.get_distance_between_links(palm_link, tool_frame) + return distance + else: + return self.distance_palm_to_tool_frame_right + + def get_distance_between_links(self, link1: str, link2: str) -> float: + """ + Calculates the distance between two links in the URDF by summing the lengths + of all intermediate links in the kinematic chain between them. + + Args: + link1 (str): The name of the first link. + link2 (str): The name of the second link. + + Returns: + float: The total distance between the two links. + """ + robot = self.urdf_object + distance = 0.0 + + kinematic_chain = robot.get_chain(link1, link2, joints=False) + joints = robot.get_chain(link1, link2, links=False) + + for joint in joints: + if self.get_parent(joint) in kinematic_chain and self.get_child(joint) in kinematic_chain: + translation = robot.joint_map[joint].origin.position + link_length = np.linalg.norm(translation) + distance += link_length + + return distance + + def set_max_reach(self, start_link: str, end_link: str, factor: float = 0.65): + """ + Calculates and sets the maximum reach of the robot by summing the lengths of + links in the kinematic chain between the specified start and end links. The + calculated reach is scaled by a given factor. + + Args: + start_link (str): The starting link in the kinematic chain. + end_link (str): The ending link in the kinematic chain. + factor (float): A scaling factor to adjust the calculated maximum reach. + Defaults to 0.65. + """ + robot = self.urdf_object + max_reach = 0.0 + + kinematic_chain = robot.get_chain(start_link, end_link, joints=False) + joints = robot.get_chain(start_link, end_link, links=False) + + for joint in joints: + if self.get_parent(joint) in kinematic_chain and self.get_child( + joint) in kinematic_chain: + translation = robot.joint_map[joint].origin.position + link_length = np.linalg.norm(translation) + max_reach += link_length + + self.max_reach = max_reach * factor + + def get_max_reach(self) -> float: + """ + Retrieves the maximum reach of the robot. If it has not been set, calculates + the reach based on the primary manipulator chain and sets it. + + Returns: + float: The maximum reach of the robot. + """ + if not self.max_reach: + manip = self.get_manipulator_chains()[0] + self.set_max_reach(manip.start_link, manip.end_effector.tool_frame) + + return self.max_reach + + def get_joint_limits(self, joint_name: str) -> Optional[Tuple[float, float]]: + """ + Retrieves the joint limits for a specified joint in the URDF. + + If the joint is not found or does not have defined limits, logs an error + and returns `None`. + + Args: + joint_name (str): The name of the joint for which to retrieve limits. + + Returns: + Optional[Tuple[float, float]]: A tuple containing the lower and upper joint limits, + or `None` if the joint is not found or has no limits. + """ + if joint_name not in self.urdf_object.joint_map: + rospy.logerr(f"Joint {joint_name} not found in URDF") + return None + joint = self.urdf_object.joint_map[joint_name] + if joint.limit: + return joint.limit.lower, joint.limit.upper + else: + return None + + def set_neck(self, yaw_joint: Optional[str] = None, pitch_joint: Optional[str] = None, + roll_joint: Optional[str] = None): + """ + Defines the neck configuration of the robot by setting the yaw, pitch, and roll + joints along with their corresponding links. + + Args: + yaw_joint (Optional[str]): The name of the yaw joint. Defaults to None. + pitch_joint (Optional[str]): The name of the pitch joint. Defaults to None. + roll_joint (Optional[str]): The name of the roll joint. Defaults to None. + """ + yaw_link = self.get_child(yaw_joint) if yaw_joint else None + pitch_link = self.get_child(pitch_joint) if pitch_joint else None + roll_link = self.get_child(roll_joint) if roll_joint else None + self.neck = { + "yaw": [yaw_link, yaw_joint], + "pitch": [pitch_link, pitch_joint], + "roll": [roll_link, roll_joint] + } + + def get_neck(self) -> Dict[str, List[Optional[str]]]: + """ + Retrieves the neck configuration of the robot, including links and joints for yaw, + pitch, and roll. + + Returns: + Dict[str, List[Optional[str]]]: A dictionary containing neck links and joints. + """ + return self.neck + + def set_palm_axis(self, axis: List[float]): + """ + Sets the direction axis for the robot's palm. + + Args: + axis (List[float]): A list representing the direction of the palm axis. + """ + self.palm_axis = axis + + def get_palm_axis(self) -> List[float]: + """ + Retrieves the direction axis of the robot's palm. + + Returns: + List[float]: The current direction of the palm axis. + """ + return self.palm_axis + class KinematicChainDescription: """ @@ -665,6 +871,44 @@ def add_grasp_orientations(self, orientations: Dict[Grasp, List[float]]): """ self.grasps.update(orientations) + def generate_all_grasp_orientations_from_front_grasp(self, front_orientation: List[float]): + """ + Generates all grasp orientations based on a given front-facing orientation. + + This method calculates orientations for six grasp directions (front, back, left, right, + top, and bottom) relative to a specified front-facing orientation. Each orientation + is computed by applying a quaternion multiplication between the front orientation and + predefined relative rotations. + + Args: + front_orientation (List[float]): A quaternion representing the front-facing orientation + as [x, y, z, w]. + """ + relative_rotations = { + Grasp.FRONT: [0, 0, 0, 1], + Grasp.BACK: [0, 0, 1, 0], + Grasp.LEFT: [0, 0, -0.707, 0.707], + Grasp.RIGHT: [0, 0, 0.707, 0.707], + Grasp.TOP: [0, 0.707, 0, 0.707], + Grasp.BOTTOM: [0, -0.707, 0, 0.707] + } + + all_orientations = {} + + for grasp, relative_rotation in relative_rotations.items(): + x1, y1, z1, w1 = front_orientation + x2, y2, z2, w2 = relative_rotation + + grasp_orientation_x = w2 * x1 + x2 * w1 + y2 * z1 - z2 * y1 + grasp_orientation_y = w2 * y1 - x2 * z1 + y2 * w1 + z2 * x1 + grasp_orientation_z = w2 * z1 + x2 * y1 - y2 * x1 + z2 * w1 + grasp_orientation_w = w2 * w1 - x2 * x1 - y2 * y1 - z2 * z1 + + all_orientations[grasp] = [grasp_orientation_x, grasp_orientation_y, + grasp_orientation_z, grasp_orientation_w] + + self.grasps = all_orientations + @property def links(self) -> List[str]: """ diff --git a/src/pycram/robot_descriptions/armar_description.py b/src/pycram/robot_descriptions/armar_description.py new file mode 100644 index 000000000..b87c06a04 --- /dev/null +++ b/src/pycram/robot_descriptions/armar_description.py @@ -0,0 +1,150 @@ +from ..robot_description import RobotDescription, KinematicChainDescription, EndEffectorDescription, \ + RobotDescriptionManager, CameraDescription +from ..datastructures.enums import Arms, Grasp, GripperState, GripperType, TorsoState +import rospkg + +rospack = rospkg.RosPack() +filename = rospack.get_path('pycram') + '/resources/robots/' + "Armar6" + '.urdf' + +armar_description = RobotDescription("Armar6", "world", "torso", "torso_joint", + filename) + +################################## Left Arm ################################## +left_arm = KinematicChainDescription("left", "world", "arm_t8_r0", + armar_description.urdf_object, arm_type=Arms.LEFT) + +left_arm.add_static_joint_states("park", {"torso_joint": -0.15, + "arm_t12_joint_r0": 0, + "arm_t23_joint_r0": 0, + "arm_t34_joint_r0": 1.5, + "arm_t45_joint_r0": 0.5, + "arm_t56_joint_r0": 2.0, + "arm_t67_joint_r0": 1.5, + "arm_t78_joint_r0": 0, + "arm_t8_joint_r0": 0}) + +armar_description.add_kinematic_chain_description(left_arm) + +################################## Left Gripper ################################## +left_gripper = EndEffectorDescription("left_gripper", "arm_t8_r0", "left_tool_frame", + armar_description.urdf_object) +left_gripper.add_static_joint_states(GripperState.OPEN, {"Thumb L 1 Joint": 0.0, + "Thumb L 2 Joint": 0.0, + "Index L 1 Joint": 0.0, + "Index L 2 Joint": 0.0, + "Index L 3 Joint": 0.0, + "Middle L 1 Joint": 0.0, + "Middle L 2 Joint": 0.0, + "Middle L 3 Joint": 0.0, + "Ring L 1 Joint": 0.0, + "Ring L 2 Joint": 0.0, + "Ring L 3 Joint": 0.0, + "Pinky L 1 Joint": 0.0, + "Pinky L 2 Joint": 0.0, + "Pinky L 3 Joint": 0.0}) +left_gripper.add_static_joint_states(GripperState.CLOSE, {"Thumb L 1 Joint": 1.57, + "Thumb L 2 Joint": 1.57, + "Index L 1 Joint": 1.57, + "Index L 2 Joint": 1.57, + "Index L 3 Joint": 1.57, + "Middle L 1 Joint": 1.57, + "Middle L 2 Joint": 1.57, + "Middle L 3 Joint": 1.57, + "Ring L 1 Joint": 1.57, + "Ring L 2 Joint": 1.57, + "Ring L 3 Joint": 1.57, + "Pinky L 1 Joint": 1.57, + "Pinky L 2 Joint": 1.57, + "Pinky L 3 Joint": 1.57}) + +left_gripper.end_effector_type = GripperType.FINGER +# left_gripper.opening_distance = 0.548 +left_arm.end_effector = left_gripper + +################################## Right Arm ################################## +right_arm = KinematicChainDescription("right", "world", "arm_t8_r1", + armar_description.urdf_object, arm_type=Arms.RIGHT) + +right_arm.add_static_joint_states("park", {"torso_joint": -0.15, + "arm_t12_joint_r1": 0, + "arm_t23_joint_r1": 0, + "arm_t34_joint_r1": 1.5, + "arm_t45_joint_r1": 2.64, + "arm_t56_joint_r1": 2.0, + "arm_t67_joint_r1": 1.6415, + "arm_t78_joint_r1": 0, + "arm_t8_joint_r1": 0}) + +armar_description.add_kinematic_chain_description(right_arm) + +################################## Right Gripper ################################## +right_gripper = EndEffectorDescription("right_gripper", "arm_t8_r1", "right_tool_frame", + armar_description.urdf_object) +right_gripper.add_static_joint_states(GripperState.OPEN, {"Thumb R 1 Joint": 0.0, + "Thumb R 2 Joint": 0.0, + "Index R 1 Joint": 0.0, + "Index R 2 Joint": 0.0, + "Index R 3 Joint": 0.0, + "Middle R 1 Joint": 0.0, + "Middle R 2 Joint": 0.0, + "Middle R 3 Joint": 0.0, + "Ring R 1 Joint": 0.0, + "Ring R 2 Joint": 0.0, + "Ring R 3 Joint": 0.0, + "Pinky R 1 Joint": 0.0, + "Pinky R 2 Joint": 0.0, + "Pinky R 3 Joint": 0.0}) +right_gripper.add_static_joint_states(GripperState.CLOSE, {"Thumb R 1 Joint": 1.57, + "Thumb R 2 Joint": 1.57, + "Index R 1 Joint": 1.57, + "Index R 2 Joint": 1.57, + "Index R 3 Joint": 1.57, + "Middle R 1 Joint": 1.57, + "Middle R 2 Joint": 1.57, + "Middle R 3 Joint": 1.57, + "Ring R 1 Joint": 1.57, + "Ring R 2 Joint": 1.57, + "Ring R 3 Joint": 1.57, + "Pinky R 1 Joint": 1.57, + "Pinky R 2 Joint": 1.57, + "Pinky R 3 Joint": 1.57}) + +right_gripper.end_effector_type = GripperType.FINGER +# right_gripper.opening_distance = 0.548 +right_arm.end_effector = right_gripper + +################################## Torso ################################## +torso = KinematicChainDescription("torso", "world", "torso", + armar_description.urdf_object) + +torso.add_static_joint_states(TorsoState.HIGH, {"torso_joint": 0.0}) + +torso.add_static_joint_states(TorsoState.MID, {"torso_joint": -0.15}) + +torso.add_static_joint_states(TorsoState.LOW, {"torso_joint": -0.365}) + +armar_description.add_kinematic_chain_description(torso) + +################################## Camera ################################## +camera = CameraDescription("Roboception", "Roboception", 1.8, + 2.1, 0.99483, 0.75049, + [0, 0, 1]) +armar_description.add_camera_description(camera) + +################################## Neck ################################## +armar_description.add_kinematic_chain("neck", "lower_neck", "upper_neck") +armar_description.set_neck("neck_1_yaw", "neck_2_pitch") + + +################################# Grasps ################################## +left_gripper.generate_all_grasp_orientations_from_front_grasp([0.707, 0.707, 0.707, 0.707]) +right_gripper.generate_all_grasp_orientations_from_front_grasp([0.707, 0.707, 0.707, 0.707]) + +################################# Additionals ################################## +armar_description.set_costmap_offset(0.3) +armar_description.set_max_reach("torso", "left_tool_frame") +armar_description.set_palm_axis([0, 0, 1]) + +# Add to RobotDescriptionManager +rdm = RobotDescriptionManager() +rdm.register_description(armar_description) diff --git a/src/pycram/robot_descriptions/donbot_description.py b/src/pycram/robot_descriptions/donbot_description.py index 2fda85b58..7ce054951 100644 --- a/src/pycram/robot_descriptions/donbot_description.py +++ b/src/pycram/robot_descriptions/donbot_description.py @@ -6,7 +6,7 @@ rospack = rospkg.RosPack() filename = rospack.get_path('pycram') + '/resources/robots/' + "iai_donbot" + '.urdf' -donbot_description = RobotDescription("iai_donbot", "base_link", "ur5_base_link", "arm_base_mounting_joint", +donbot_description = RobotDescription("iai_donbot", "base_footprint", "ur5_base_link", "arm_base_mounting_joint", filename) ################################## Right Arm ################################## @@ -35,7 +35,7 @@ left_arm.end_effector = left_gripper ################################## Torso ################################## -torso = KinematicChainDescription("torso", "base_link", "ur5_base_link", +torso = KinematicChainDescription("torso", "base_footprint", "ur5_base_link", donbot_description.urdf_object, include_fixed_joints=True) # fixed joint, so all states set to 0 @@ -56,6 +56,7 @@ ################################## Neck ################################## neck = KinematicChainDescription("neck", "ur5_base_link", "ur5_wrist_3_link", donbot_description.urdf_object) +# TODO: may be able to remove this already neck.add_static_joint_states("down", {'ur5_shoulder_pan_joint': 4.130944728851318, 'ur5_shoulder_lift_joint': 0.04936718940734863, 'ur5_elbow_joint': -1.9734209219561976, @@ -112,11 +113,11 @@ 'ur5_wrist_2_joint': -1.5, 'ur5_wrist_3_joint': 1.5}) +donbot_description.set_neck("ur5_wrist_2_joint", "ur5_wrist_1_joint") + + ################################# Grasps ################################## -left_gripper.add_grasp_orientations({Grasp.FRONT: [0.707, -0.707, 0.707, -0.707], - Grasp.LEFT: [0, 0.707, -0.707, 0], - Grasp.RIGHT: [0, 0, 1, 1], - Grasp.TOP: [0.707, -0.707, 0, 0.0]}) +left_gripper.generate_all_grasp_orientations_from_front_grasp([0.707, -0.707, 0.707, -0.707]) # Add to RobotDescriptionManager rdm = RobotDescriptionManager() diff --git a/src/pycram/robot_descriptions/hsrb_description.py b/src/pycram/robot_descriptions/hsrb_description.py index 6dc4cad0f..76d2108b9 100644 --- a/src/pycram/robot_descriptions/hsrb_description.py +++ b/src/pycram/robot_descriptions/hsrb_description.py @@ -50,6 +50,8 @@ ################################## Neck ################################## neck = KinematicChainDescription("neck", "head_pan_link", "head_tilt_link", hsrb_description.urdf_object) +hsrb_description.set_neck("head_pan_joint", "head_tilt_joint") + ################################## Torso ################################## torso = KinematicChainDescription("torso", "base_link", "torso_lift_link", @@ -71,6 +73,11 @@ hsrb_description.add_kinematic_chain_description(neck) +############################## Additionals ################################ +# hsrb_description.set_costmap_offset(0.3) # set to correct offset if needed +# hsrb_description.set_palm_axis([0, 0, -1]) # set to correct axis if needed +# hsrb_description.set_max_reach("torso_lift_link", "gripper_left_tool_link") # set to correct links if needed + # Add to RobotDescriptionManager rdm = RobotDescriptionManager() rdm.register_description(hsrb_description) diff --git a/src/pycram/robot_descriptions/icub_description.py b/src/pycram/robot_descriptions/icub_description.py index 2bff17be4..ca193e1a0 100644 --- a/src/pycram/robot_descriptions/icub_description.py +++ b/src/pycram/robot_descriptions/icub_description.py @@ -49,26 +49,26 @@ "l_hand_little_1_joint": 0.0, "l_hand_little_2_joint": 0.0, "l_hand_little_3_joint": 0.0}) -left_gripper.add_static_joint_states(GripperState.CLOSE, {"l_hand_thumb_0_joint": 0.0, - "l_hand_thumb_1_joint": 0.0, - "l_hand_thumb_2_joint": 0.0, - "l_hand_thumb_3_joint": 0.0, - "l_hand_index_0_joint": 0.0, - "l_hand_index_1_joint": 0.0, - "l_hand_index_2_joint": 0.0, - "l_hand_index_3_joint": 0.0, - "l_hand_middle_0_joint": 0.0, - "l_hand_middle_1_joint": 0.0, - "l_hand_middle_2_joint": 0.0, - "l_hand_middle_3_joint": 0.0, - "l_hand_ring_0_joint": 0.0, - "l_hand_ring_1_joint": 0.0, - "l_hand_ring_2_joint": 0.0, - "l_hand_ring_3_joint": 0.0, - "l_hand_little_0_joint": 0.0, - "l_hand_little_1_joint": 0.0, - "l_hand_little_2_joint": 0.0, - "l_hand_little_3_joint": 0.0}) +left_gripper.add_static_joint_states(GripperState.CLOSE, {"l_hand_thumb_0_joint": 1.5707963267948966, + "l_hand_thumb_1_joint": 1.5707963267948966, + "l_hand_thumb_2_joint": 1.5707963267948966, + "l_hand_thumb_3_joint": 1.5707963267948966, + "l_hand_index_0_joint": -0.3490658503988659, + "l_hand_index_1_joint": 1.5707963267948966, + "l_hand_index_2_joint": 1.5707963267948966, + "l_hand_index_3_joint": 1.5707963267948966, + "l_hand_middle_0_joint": 0.3490658503988659, + "l_hand_middle_1_joint": 1.5707963267948966, + "l_hand_middle_2_joint": 1.5707963267948966, + "l_hand_middle_3_joint": 1.5707963267948966, + "l_hand_ring_0_joint": 0.3490658503988659, + "l_hand_ring_1_joint": 1.5707963267948966, + "l_hand_ring_2_joint": 1.5707963267948966, + "l_hand_ring_3_joint": 1.5707963267948966, + "l_hand_little_0_joint": 0.3490658503988659, + "l_hand_little_1_joint": 1.5707963267948966, + "l_hand_little_2_joint": 1.5707963267948966, + "l_hand_little_3_joint": 1.5707963267948966}) left_gripper.end_effector_type = GripperType.FINGER # left_gripper.opening_distance = 0.548 @@ -114,26 +114,26 @@ "r_hand_little_1_joint": 0.0, "r_hand_little_2_joint": 0.0, "r_hand_little_3_joint": 0.0}) -right_gripper.add_static_joint_states(GripperState.CLOSE, {"r_hand_thumb_0_joint": 0.0, - "r_hand_thumb_1_joint": 0.0, - "r_hand_thumb_2_joint": 0.0, - "r_hand_thumb_3_joint": 0.0, - "r_hand_index_0_joint": 0.0, - "r_hand_index_1_joint": 0.0, - "r_hand_index_2_joint": 0.0, - "r_hand_index_3_joint": 0.0, - "r_hand_middle_0_joint": 0.0, - "r_hand_middle_1_joint": 0.0, - "r_hand_middle_2_joint": 0.0, - "r_hand_middle_3_joint": 0.0, - "r_hand_ring_0_joint": 0.0, - "r_hand_ring_1_joint": 0.0, - "r_hand_ring_2_joint": 0.0, - "r_hand_ring_3_joint": 0.0, - "r_hand_little_0_joint": 0.0, - "r_hand_little_1_joint": 0.0, - "r_hand_little_2_joint": 0.0, - "r_hand_little_3_joint": 0.0}) +right_gripper.add_static_joint_states(GripperState.CLOSE, {"r_hand_thumb_0_joint": 1.5707963267948966, + "r_hand_thumb_1_joint": 1.5707963267948966, + "r_hand_thumb_2_joint": 1.5707963267948966, + "r_hand_thumb_3_joint": 1.5707963267948966, + "r_hand_index_0_joint": -0.3490658503988659, + "r_hand_index_1_joint": 1.5707963267948966, + "r_hand_index_2_joint": 1.5707963267948966, + "r_hand_index_3_joint": 1.5707963267948966, + "r_hand_middle_0_joint": 0.3490658503988659, + "r_hand_middle_1_joint": 1.5707963267948966, + "r_hand_middle_2_joint": 1.5707963267948966, + "r_hand_middle_3_joint": 1.5707963267948966, + "r_hand_ring_0_joint": 0.3490658503988659, + "r_hand_ring_1_joint": 1.5707963267948966, + "r_hand_ring_2_joint": 1.5707963267948966, + "r_hand_ring_3_joint": 1.5707963267948966, + "r_hand_little_0_joint": 0.3490658503988659, + "r_hand_little_1_joint": 1.5707963267948966, + "r_hand_little_2_joint": 1.5707963267948966, + "r_hand_little_3_joint": 1.5707963267948966}) right_gripper.end_effector_type = GripperType.FINGER # right_gripper.opening_distance = 0.548 @@ -161,14 +161,14 @@ ################################## Neck ################################## icub_description.add_kinematic_chain("neck", "chest", "head") +icub_description.set_neck("neck_yaw", "neck_pitch", "neck_roll") ################################# Grasps ################################## -grasps = {Grasp.FRONT: [0.707, 0.707, 0.707, 0.707], - Grasp.LEFT: [1, 0, 0, 1], - Grasp.RIGHT: [0, 1, 1, 0], - Grasp.TOP: [1, 1, 0, 0]} -right_gripper.add_grasp_orientations(grasps) -left_gripper.add_grasp_orientations(grasps) +right_gripper.generate_all_grasp_orientations_from_front_grasp([0.5, 0.5, 0.5, 0.5]) +# right_gripper.add_grasp_orientation(Grasp.TOP, [1, 1, 0, 0]) + +left_gripper.generate_all_grasp_orientations_from_front_grasp([0.5, 0.5, 0.5, 0.5]) +# left_gripper.add_grasp_orientation(Grasp.TOP, [1, 1, 0, 0]) # Add to RobotDescriptionManager rdm = RobotDescriptionManager() diff --git a/src/pycram/robot_descriptions/justin_description.py b/src/pycram/robot_descriptions/justin_description.py index 1e2c6c5f3..8018ffbb5 100644 --- a/src/pycram/robot_descriptions/justin_description.py +++ b/src/pycram/robot_descriptions/justin_description.py @@ -168,19 +168,16 @@ ################################## Neck ################################## justin_description.add_kinematic_chain("neck", "torso4", "head2") +justin_description.set_neck("head1_joint", "head2_joint") + ################################# Grasps ################################## -left_gripper.add_grasp_orientations({Grasp.FRONT: [0.707, -0.707, 0.707, -0.707], - Grasp.LEFT: [1, 0, 0, 1], - Grasp.RIGHT: [-1, 0, 1, 1], - Grasp.TOP: [-1, 1, 0, 0]}) -right_gripper.add_grasp_orientations({Grasp.FRONT: [0.707, 0.707, 0.707, 0.707], - Grasp.LEFT: [1, 0, 0, 1], - Grasp.RIGHT: [0, 1, 1, 0], - Grasp.TOP: [1, 1, 0, 0]}) +left_gripper.generate_all_grasp_orientations_from_front_grasp([0.707, -0.707, 0.707, -0.707]) +right_gripper.generate_all_grasp_orientations_from_front_grasp([0.707, 0.707, 0.707, 0.707]) ################################# Additionals ################################## -justin_description.set_costmap_offset(0.8) +justin_description.set_costmap_offset(0.5) +justin_description.set_palm_axis([0, 0, 1]) # Add to RobotDescriptionManager rdm = RobotDescriptionManager() diff --git a/src/pycram/robot_descriptions/pr2_description.py b/src/pycram/robot_descriptions/pr2_description.py index f55a8fda4..3a20b6dca 100644 --- a/src/pycram/robot_descriptions/pr2_description.py +++ b/src/pycram/robot_descriptions/pr2_description.py @@ -1,3 +1,5 @@ +import numpy as np + from ..robot_description import RobotDescription, KinematicChainDescription, EndEffectorDescription, \ RobotDescriptionManager, CameraDescription from ..datastructures.enums import Arms, Grasp, GripperState, GripperType, TorsoState @@ -74,15 +76,19 @@ ################################## Neck ################################## pr2_description.add_kinematic_chain("neck", "torso_lift_link", "head_tilt_link") +pr2_description.set_neck("head_pan_joint", "head_tilt_joint") + ################################# Grasps ################################## -grasps = {Grasp.FRONT: [0, 0, 0, 1], - Grasp.LEFT: [0, 0, -1, 1], - Grasp.RIGHT: [0, 0, 1, 1], - Grasp.TOP: [0, 1, 0, 1]} -right_gripper.add_grasp_orientations(grasps) -left_gripper.add_grasp_orientations(grasps) +right_gripper.generate_all_grasp_orientations_from_front_grasp([0, 0, 0, 1]) +left_gripper.generate_all_grasp_orientations_from_front_grasp([0, 0, 0, 1]) + +################################## Additionals ################################## +pr2_description.set_max_reach("torso_lift_link", "l_gripper_tool_frame") +pr2_description.set_palm_axis([1, 0, 0]) + # Add to RobotDescriptionManager rdm = RobotDescriptionManager() rdm.register_description(pr2_description) + diff --git a/src/pycram/robot_descriptions/stretch_description.py b/src/pycram/robot_descriptions/stretch_description.py index d267108b3..75d1249eb 100644 --- a/src/pycram/robot_descriptions/stretch_description.py +++ b/src/pycram/robot_descriptions/stretch_description.py @@ -39,6 +39,7 @@ neck = KinematicChainDescription("neck", "link_head", "link_head_tilt", stretch_description.urdf_object) stretch_description.add_kinematic_chain_description(neck) +stretch_description.set_neck("joint_head_pan", "joint_head_tilt") ################################## Torso ################################## torso = KinematicChainDescription("torso", "link_mast", "link_lift", @@ -69,6 +70,11 @@ Grasp.RIGHT: [0, 0, 1, 1], Grasp.TOP: [0, 1, 0, 1]}) +############################## Additionals ################################ +# stretch_description.set_costmap_offset(0.3) # set to correct offset if needed +# stretch_description.set_palm_axis([0, 0, -1]) # set to correct axis if needed +# stretch_description.set_max_reach("torso_lift_link", "gripper_left_tool_link") # set to correct links if needed + # Add to RobotDescriptionManager rdm = RobotDescriptionManager() rdm.register_description(stretch_description) diff --git a/src/pycram/robot_descriptions/tiago_description.py b/src/pycram/robot_descriptions/tiago_description.py index 708442f81..71290eb86 100644 --- a/src/pycram/robot_descriptions/tiago_description.py +++ b/src/pycram/robot_descriptions/tiago_description.py @@ -25,7 +25,7 @@ tiago_description.add_kinematic_chain_description(left_arm) ################################## Left Gripper ################################## -left_gripper = EndEffectorDescription("left_gripper", "gripper_left_link", "gripper_left_tool_link", +left_gripper = EndEffectorDescription("left_gripper", "wrist_left_ft_link", "gripper_left_tool_link", tiago_description.urdf_object) left_gripper.add_static_joint_states(GripperState.OPEN, {'gripper_left_left_finger_joint': 0.048, @@ -35,6 +35,7 @@ 'gripper_left_right_finger_joint': 0.0}) left_arm.end_effector = left_gripper +tiago_description.set_distance_palm_to_tool_frame(Arms.LEFT, tiago_description.get_distance_palm_to_tool_frame(Arms.LEFT)/2) ################################## Right Arm ################################## right_arm = KinematicChainDescription("right_arm", "torso_fixed_link", "arm_right_7_link", @@ -52,7 +53,7 @@ tiago_description.add_kinematic_chain_description(right_arm) ################################## Right Gripper ################################## -right_gripper = EndEffectorDescription("right_gripper", "gripper_right_link", "gripper_right_tool_link", +right_gripper = EndEffectorDescription("right_gripper", "wrist_right_ft_link", "gripper_right_tool_link", tiago_description.urdf_object) right_gripper.add_static_joint_states(GripperState.OPEN, {'gripper_right_left_finger_joint': 0.048, @@ -62,6 +63,8 @@ 'gripper_right_right_finger_joint': 0.0}) right_arm.end_effector = right_gripper +tiago_description.set_distance_palm_to_tool_frame(Arms.RIGHT, tiago_description.get_distance_palm_to_tool_frame(Arms.RIGHT)/2) + ################################## Torso ################################## torso = KinematicChainDescription("torso", "torso_fixed_link", "torso_lift_link", @@ -81,17 +84,18 @@ ################################## Neck ################################## tiago_description.add_kinematic_chain("neck", "torso_lift_link", "head_2_link") +tiago_description.set_neck("head_1_joint", "head_2_joint") + ################################# Grasps ################################## -grasps = {Grasp.FRONT: [-0.5, 0.5, 0.5, -0.5], - Grasp.LEFT: [0, 0, -1, 1], - Grasp.RIGHT: [0, 0, 1, 1], - Grasp.TOP: [0, 0, -0.7071068, 0.7071068]} -right_gripper.add_grasp_orientations(grasps) -left_gripper.add_grasp_orientations(grasps) +front_grasp = [-0.5, 0.5, 0.5, -0.5] +right_gripper.generate_all_grasp_orientations_from_front_grasp(front_grasp) +left_gripper.generate_all_grasp_orientations_from_front_grasp(front_grasp) ################################# Additionals ################################## -tiago_description.set_costmap_offset(0.35) +tiago_description.set_costmap_offset(0.3) +tiago_description.set_palm_axis([0, 0, -1]) +tiago_description.set_max_reach("torso_lift_link", "gripper_left_tool_link") # Add to RobotDescriptionManager rdm = RobotDescriptionManager() diff --git a/src/pycram/ros/viz_marker_publisher.py b/src/pycram/ros/viz_marker_publisher.py index 0d84ce074..9d57a29fa 100644 --- a/src/pycram/ros/viz_marker_publisher.py +++ b/src/pycram/ros/viz_marker_publisher.py @@ -333,7 +333,6 @@ def remove_marker(self, bw_object: Optional[ObjectDesignatorDescription] = None, self.marker_array.markers.pop(marker_id) # rospy.loginfo(f"Removed Marker '{name}'") - def clear_all_marker(self): """ Clear all existing markers @@ -385,16 +384,18 @@ def publish(self, poses: List[Pose], duration=15.0, length=0.1, name=None): color = self.colorclass for pose in self.poses: - self._create_line(pose, AxisIdentifier.X.value, self.duration, self.length, color.get_color_from_string('red')) - self._create_line(pose, AxisIdentifier.Y.value, self.duration, self.length, color.get_color_from_string('green')) - self._create_line(pose, AxisIdentifier.Z.value, self.duration, self.length, color.get_color_from_string('blue')) + self._create_line(pose, AxisIdentifier.X.value, self.duration, self.length, + color.get_color_from_string('red')) + self._create_line(pose, AxisIdentifier.Y.value, self.duration, self.length, + color.get_color_from_string('green')) + self._create_line(pose, AxisIdentifier.Z.value, self.duration, self.length, + color.get_color_from_string('blue')) self.thread.start() # rospy.loginfo("Publishing axis visualization") self.thread.join() # rospy.logdebug("Stopped Axis visualization") - def _publish(self): if self.name in self.marker_overview.keys(): self._update_marker(self.marker_overview[self.name], new_pose=self.pose) @@ -534,3 +535,177 @@ def _update_marker(self, marker_id, new_pose): # Update was not successful # rospy.logwarn(f"Marker {marker_id} not found for update") return False + + +class CostmapPublisher: + """ + Class to manually add and remove marker of objects and poses. + """ + + def __init__(self, topic_name: str = '/pycram/costmap_marker', interval: float = 0.1): + """ + The Publisher creates an Array of Visualization marker with a marker for a pose or object. + This Array is published with a rate of interval. + + :param topic_name: Name of the marker topic + :param interval: Interval at which the marker should be published + """ + self.start_time = None + self.marker_array_pub = rospy.Publisher(topic_name, Marker, queue_size=10) + + self.marker = Marker() + self.marker_overview = {} + self.current_id = 0 + + self.interval = interval + self.log_message = None + + def publish(self, poses: List[Pose], size: float = None, name: Optional[str] = None, scale: Optional[float] = 0.4): + """ + Publish a pose or an object into the MarkerArray. + Priorities to add an object if possible + + :param poses: List of Pose of the Costmap + :param scale: Scale of the z-axis of the costmap + :param name: Name of the marker + """ + + self.start_time = time.time() + thread = threading.Thread(target=self._publish, args=(poses, name, size, scale)) + thread.start() + # rospy.loginfo(self.log_message) + thread.join() + + def _publish(self, poses: List[Pose], name: Optional[str] = None, + size=None, scale=0.4): + """ + Publish the marker into the MarkerArray + """ + stop_thread = False + duration = 2 + + while not stop_thread: + if time.time() - self.start_time > duration: + stop_thread = True + self._publish_costmap(name=name, poses=poses, size=size, scale=scale) + + rospy.sleep(self.interval) + + def _publish_costmap(self, name: str, poses: List[Pose], size=None, scale=0.4): + """ + Publish a Pose as a marker + + :param name: Name of the marker + :param pose: Pose of the marker + :param scale: Scale of the z-axis of the costmap + """ + + if name is None: + name = 'costmap_marker' + + if name in self.marker_overview.keys(): + self._update_marker(self.marker_overview[name], new_poses=poses) + return + + self._make_marker_array(name=name, marker_type=Marker.POINTS, costmap_poses=poses, + marker_scales=(size, size, size), z_scale=scale) + self.marker_array_pub.publish(self.marker) + self.log_message = f"Pose '{name}' published" + + def _make_marker_array(self, name, marker_type: int, costmap_poses: List[Pose], + marker_scales: Tuple = (1.0, 1.0, 1.0), z_scale: float = 0.4): + """ + Create a Marker and add it to the MarkerArray + + :param name: Name of the Marker + :param marker_type: Type of the marker to create + :param marker_pose: Pose of the marker + :param marker_scales: individual scaling of the markers axes + :param scale: Scale of the z-axis of the costmap + """ + + frame_id = "pycram/map" + new_marker = Marker() + new_marker.id = self.current_id + new_marker.header.frame_id = frame_id + new_marker.ns = name + new_marker.header.stamp = rospy.Time.now() + new_marker.type = marker_type + new_marker.action = Marker.ADD + for costmap_pose in costmap_poses: + color_rgba = Color.gaussian_color_map(costmap_pose.position.z, 0, z_scale) + new_marker.scale.x = marker_scales[0] + new_marker.scale.y = marker_scales[1] + new_marker.scale.z = marker_scales[2] + new_marker.colors.append(color_rgba) + + point = Point() + point.x = costmap_pose.position.x + point.y = costmap_pose.position.y + point.z = costmap_pose.position.z + new_marker.points.append(point) + + self.marker = new_marker + self.marker_overview[name] = new_marker.id + self.current_id += 1 + + def _update_marker(self, marker_id: int, new_poses: List[Pose]) -> bool: + """ + Update an existing marker to a new pose + + :param marker_id: id of the marker that should be updated + :param new_pose: Pose where the updated marker is set + + :return: True if update was successful, False otherwise + """ + + # Find the marker with the specified ID + if self.marker.id == marker_id: + # Update successful + new_points = [] + for new_pose in new_poses: + self.marker.points = [] + point = Point() + point.x = new_pose.position.x + point.y = new_pose.position.y + point.z = new_pose.position.z + new_points.append(point) + self.marker.points = new_points + self.log_message = f"Marker '{self.marker.ns}' updated" + self.marker_array_pub.publish(self.marker) + return True + + # Update was not successful + # rospy.logwarn(f"Marker {marker_id} not found for update") + return False + + def remove_marker(self, name: Optional[str] = None): + """ + Remove a marker by object or name + + :param bw_object: Object which marker should be removed + :param name: Name of object that should be removed + """ + + if name is None: + # rospy.logerr('No name for object given, cannot remove marker') + return + + marker_id = self.marker_overview.pop(name) + + if self.marker.id == marker_id: + self.marker.action = Marker.DELETE + + self.marker_array_pub.publish(self.marker) + # rospy.loginfo(f"Removed Marker '{name}'") + + def clear_all_marker(self): + """ + Clear all existing markers + """ + self.marker.action = Marker.DELETE + + self.marker_overview = {} + self.marker_array_pub.publish(self.marker) + + # rospy.loginfo('Removed all markers') diff --git a/src/pycram/utils.py b/src/pycram/utils.py index 3a611cd8d..8a2d523a8 100644 --- a/src/pycram/utils.py +++ b/src/pycram/utils.py @@ -6,15 +6,21 @@ Classes: GeneratorList -- implementation of generator list wrappers. """ +import sys from inspect import isgeneratorfunction + +import numpy as np from typing_extensions import List, Tuple, Callable import os +from urdf_parser_py.urdf import URDF + from .datastructures.pose import Pose import math from typing_extensions import Dict +from scipy.spatial.transform import Rotation as R class bcolors: @@ -116,6 +122,78 @@ def axis_angle_to_quaternion(axis: List, angle: float) -> Tuple: return (x, y, z, w) +def translate_relative_to_object(obj_pose, palm_axis, translation_value) -> Pose: + """ + Applies the translation directly along the palm axis returned by get_palm_axis(). + + Args: + oTg: The current pose of the object relative to the gripper. + palm_axis: A list [x, y, z] where one value is 1 or -1, and the others are 0. + translation_value: The magnitude of the retreat in meters. + gripper_pose: The current pose of the gripper. + + Returns: + None: Modifies the oTg.pose in place. + """ + object_pose = obj_pose.copy() + local_retraction = np.array([palm_axis[0] * translation_value, + palm_axis[1] * translation_value, + palm_axis[2] * translation_value]) + + quat = object_pose.orientation_as_list() + + rotation_matrix = R.from_quat(quat).as_matrix() + + retraction_world = rotation_matrix @ local_retraction + + object_pose.pose.position.x -= retraction_world[0] + object_pose.pose.position.y -= retraction_world[1] + object_pose.pose.position.z -= retraction_world[2] + + return object_pose + + +def on_error_do_nothing(message): + pass + + +def load_urdf_silently(urdf_path_or_string: str, from_string_instead_of_file: bool = False) -> URDF: + """ + Loads a URDF file or XML string with suppressed error messages. + + This function temporarily overrides the `on_error` function in the `urdf_parser_py.xml_reflection.core` module + to suppress warnings and error messages during URDF parsing. + + Args: + urdf_path_or_string (str): Path to the URDF file or XML string if `from_string_instead_of_file` is True. + from_string_instead_of_file (bool, optional): If True, interprets `urdf_path_or_string` as an XML string. + Defaults to False. + + Returns: + URDF: Parsed URDF object. + + Raises: + ImportError: If the `urdf_parser_py.xml_reflection.core` module is not found. + """ + urdf_core_module = sys.modules.get('urdf_parser_py.xml_reflection.core') + if urdf_core_module is None: + raise ImportError( + "Could not locate `urdf_parser_py.xml_reflection.core` module. Please check how the module changed since " + "'https://github.com/ros/urdf_parser_py/blob/3bcb9051e3bc6ebb8bff0bf8dd2c2281522b05d9/src/urdf_parser_py/xml_reflection/core.py#L33'") + + original_on_error = getattr(urdf_core_module, 'on_error', None) + urdf_core_module.on_error = on_error_do_nothing + + try: + if from_string_instead_of_file: + return URDF.from_xml_string(urdf_path_or_string) + else: + return URDF.from_xml_file(urdf_path_or_string) + finally: + if original_on_error is not None: + urdf_core_module.on_error = original_on_error + + class suppress_stdout_stderr(object): """ A context manager for doing a "deep suppression" of stdout and stderr in diff --git a/src/pycram/worlds/bullet_world.py b/src/pycram/worlds/bullet_world.py index a840b753c..51e273fb7 100755 --- a/src/pycram/worlds/bullet_world.py +++ b/src/pycram/worlds/bullet_world.py @@ -34,6 +34,8 @@ class is the main interface to the Bullet Physics Engine and should be used to s extension: str = ObjectDescription.get_file_extension() + allow_publish_debug_poses: bool = False + # Check is for sphinx autoAPI to be able to work in a CI workflow if rosgraph.is_master_online(): # and "/pycram" not in rosnode.get_node_names(): rospy.init_node('pycram')