From ddcd47a66361e92059ad724f5ca499d8b35f4124 Mon Sep 17 00:00:00 2001 From: lukestroh Date: Mon, 11 Nov 2024 13:01:02 -0800 Subject: [PATCH] formatting --- README.md | 3 ++ main.py | 13 +++-- pybullet_tree_sim/camera.py | 12 +++-- pybullet_tree_sim/pruning_environment.py | 5 +- pybullet_tree_sim/tree.py | 4 +- .../urdf/robot/generic/robot.urdf.xacro | 54 ++++++++++--------- pybullet_tree_sim/utils/camera_helpers.py | 1 + pybullet_tree_sim/utils/pyb_utils.py | 4 +- test/test_camera.py | 4 +- test/test_helpers.py | 11 ++-- 10 files changed, 58 insertions(+), 53 deletions(-) diff --git a/README.md b/README.md index 6336e99..991873a 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,6 @@ +urdf generic launch CLI test: +`xacro robot.urdf.xacro > test.urdf end_effector_type:=mock_pruner eef_parent:=ur5e__tool0 arm_type:=ur5 ur_type:=ur5e tf_prefix:=ur5e__ base_attachment_type:=linear_slider` + ## TODO 1. For Claire: 1. Generic URDF to Generic Robot class diff --git a/main.py b/main.py index def576c..8d3fb03 100644 --- a/main.py +++ b/main.py @@ -21,8 +21,8 @@ def main(): penv = PruningEnv( pbutils=pbutils, load_robot=True, robot_pos=[0, 1, 0], verbose=True, cam_width=cam_width, cam_height=cam_height ) - - penv.activate_shape(shape="cylinder", radius=2*0.0254, height=2.0, orientation=[0,np.pi/2,0]) + + penv.activate_shape(shape="cylinder", radius=2 * 0.0254, height=2.0, orientation=[0, np.pi / 2, 0]) # penv.load_tree( # pbutils=pbutils, # scale=1.0, @@ -39,15 +39,15 @@ def main(): for i in range(100): pbutils.pbclient.stepSimulation() time.sleep(0.1) - + # Simulation loop while True: try: - view_matrix = penv.ur5.get_view_mat_at_curr_pose(0,0,[0,0,0]) - rgbd = penv.pbutils.get_rgbd_at_cur_pose(type='robot', view_matrix=view_matrix) + view_matrix = penv.ur5.get_view_mat_at_curr_pose(0, 0, [0, 0, 0]) + rgbd = penv.pbutils.get_rgbd_at_cur_pose(type="robot", view_matrix=view_matrix) keys_pressed = penv.get_key_pressed() action = penv.get_key_action(keys_pressed=keys_pressed) - action = action.reshape((6,1)) + action = action.reshape((6, 1)) jv, jacobian = penv.ur5.calculate_joint_velocities_from_ee_velocity_dls(end_effector_velocity=action) penv.ur5.action = jv singularity = penv.ur5.set_joint_velocities(penv.ur5.action) @@ -63,4 +63,3 @@ def main(): if __name__ == "__main__": main() - \ No newline at end of file diff --git a/pybullet_tree_sim/camera.py b/pybullet_tree_sim/camera.py index 35edf60..a648375 100644 --- a/pybullet_tree_sim/camera.py +++ b/pybullet_tree_sim/camera.py @@ -22,12 +22,12 @@ def __init__(self, pbutils, sensor_name: str, sensor_type: str = "camera") -> No super().__init__(sensor_name, sensor_type) self.pan = 0 self.tilt = 0 - self.tf_frame: str = '' + self.tf_frame: str = "" # Only dealing with depth data for now, TODO: add RGB data self.depth_width = self.params["depth"]["width"] self.depth_height = self.params["depth"]["height"] - + # Some optical sensors only provide diagonal field of view, get horizontal and vertical from diagonal try: vfov = self.params["depth"]["vfov"] @@ -37,9 +37,11 @@ def __init__(self, pbutils, sensor_name: str, sensor_type: str = "camera") -> No far_val = self.params["depth"]["far_plane"] self.depth_pixel_coords = np.array(list(np.ndindex((self.depth_width, self.depth_height))), dtype=int) - self.depth_film_coords = 2 * ( - self.depth_pixel_coords + np.array([0.5, 0.5]) - np.array([self.depth_width / 2, self.depth_height / 2]) - ) / np.array([self.depth_width, self.depth_height]) + self.depth_film_coords = ( + 2 + * (self.depth_pixel_coords + np.array([0.5, 0.5]) - np.array([self.depth_width / 2, self.depth_height / 2])) + / np.array([self.depth_width, self.depth_height]) + ) self.depth_proj_mat = pbutils.pbclient.computeProjectionMatrixFOV( fov=vfov, aspect=(self.depth_width / self.depth_height), nearVal=near_val, farVal=far_val ) diff --git a/pybullet_tree_sim/pruning_environment.py b/pybullet_tree_sim/pruning_environment.py index 6042f43..2a624e4 100644 --- a/pybullet_tree_sim/pruning_environment.py +++ b/pybullet_tree_sim/pruning_environment.py @@ -398,7 +398,9 @@ def deproject_pixels_to_points( plot = False if plot: - self.debug_plots(camera=camera, data=data, cam_coords=cam_coords, world_coords=world_coords, view_matrix=view_matrix) + self.debug_plots( + camera=camera, data=data, cam_coords=cam_coords, world_coords=world_coords, view_matrix=view_matrix + ) return world_coords @@ -562,7 +564,6 @@ def get_key_action(self, keys_pressed: list) -> np.ndarray: keys_pressed = [] return action - def run_sim(self) -> int: return 0 diff --git a/pybullet_tree_sim/tree.py b/pybullet_tree_sim/tree.py index d2d1ce6..bc9d05f 100644 --- a/pybullet_tree_sim/tree.py +++ b/pybullet_tree_sim/tree.py @@ -447,10 +447,10 @@ def load_tree_urdf( urdf_path = os.path.join(Tree._tree_generated_urdf_path, f"{namespace}{tree_type}_tree{tree_id}.urdf") if not os.path.exists(urdf_path): log.info(f"Could not find file '{urdf_path}'. Generating URDF from xacro.") - + if not os.path.isdir(Tree._tree_generated_urdf_path): os.mkdir(Tree._tree_generated_urdf_path) - + urdf_mappings = { "namespace": namespace, "tree_id": str(tree_id), diff --git a/pybullet_tree_sim/urdf/robot/generic/robot.urdf.xacro b/pybullet_tree_sim/urdf/robot/generic/robot.urdf.xacro index e5648cb..b9f01a6 100644 --- a/pybullet_tree_sim/urdf/robot/generic/robot.urdf.xacro +++ b/pybullet_tree_sim/urdf/robot/generic/robot.urdf.xacro @@ -7,18 +7,18 @@ - + - - + + - + @@ -26,10 +26,10 @@ - + - + - + @@ -84,20 +84,20 @@ - + - + - + - + - + - + @@ -181,32 +181,34 @@ fake_sensor_commands="$(arg fake_sensor_commands)" gazebo_effort="$(arg gazebo_effort)"> - + - + - + --> - + - + - + + + - - + + - - \ No newline at end of file + + diff --git a/pybullet_tree_sim/utils/camera_helpers.py b/pybullet_tree_sim/utils/camera_helpers.py index 3e87f33..ce374a4 100644 --- a/pybullet_tree_sim/utils/camera_helpers.py +++ b/pybullet_tree_sim/utils/camera_helpers.py @@ -39,6 +39,7 @@ def get_fov_from_dfov(camera_width: int, camera_height: int, dFoV: Union[int, fl fov_w = 2 * np.arctan(np.tan(_dfov / 2) * camera_width / camera_diag) return (np.rad2deg(fov_w), np.rad2deg(fov_h)) + # def get_pyb_proj_mat(vfov: float, aspect: float, nearVal: float, farVal: float): # return pbutils.pbclient.computeProjectionMatrixFOV( # fov=vfov, aspect=(self.depth_width / self.depth_height), nearVal=near_val, farVal=far_val diff --git a/pybullet_tree_sim/utils/pyb_utils.py b/pybullet_tree_sim/utils/pyb_utils.py index a2d3acf..e3db702 100644 --- a/pybullet_tree_sim/utils/pyb_utils.py +++ b/pybullet_tree_sim/utils/pyb_utils.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 from __future__ import annotations + """ Author (s): Abhinav Jain, Luke Strohbehn """ @@ -13,6 +14,7 @@ from scipy.constants import g as grav from pybullet_tree_sim import MESHES_PATH, URDF_PATH, TEXTURES_PATH + # from pybullet_tree_sim.camera import Camera # from pybullet_tree_sim.utils.camera_helpers import get_fov_from_dfov @@ -144,7 +146,7 @@ def get_image_at_curr_pose(self, camera, type, view_matrix=None) -> List: if view_matrix is None: raise ValueError("view_matrix cannot be None for robot view") return self.pbclient.getCameraImage( - width=camera.depth_width, # TODO: make separate function for rgb? + width=camera.depth_width, # TODO: make separate function for rgb? height=camera.depth_height, viewMatrix=view_matrix, projectionMatrix=camera.depth_proj_mat, diff --git a/test/test_camera.py b/test/test_camera.py index 277d7f8..d3b5635 100644 --- a/test/test_camera.py +++ b/test/test_camera.py @@ -3,8 +3,8 @@ import unittest + class TestCamera(unittest.TestCase): def test_deproject_transform(self): - + return - \ No newline at end of file diff --git a/test/test_helpers.py b/test/test_helpers.py index ec36693..5ce5d09 100644 --- a/test/test_helpers.py +++ b/test/test_helpers.py @@ -6,7 +6,6 @@ class TestHelpers(unittest.TestCase): - def test_negative_dfov(self): dfov = -65 cam_w = 100 @@ -15,7 +14,7 @@ def test_negative_dfov(self): with self.assertRaises(ValueError) as cm: helpers.get_fov_from_dfov(cam_w, cam_h, dfov) return - + def test_negative_cam_width(self): dfov = 65 cam_w = -100 @@ -24,7 +23,7 @@ def test_negative_cam_width(self): with self.assertRaises(ValueError) as cm: helpers.get_fov_from_dfov(cam_w, cam_h, dfov) return - + def test_negative_cam_height(self): dfov = 65 cam_w = 100 @@ -33,10 +32,6 @@ def test_negative_cam_height(self): with self.assertRaises(ValueError) as cm: helpers.get_fov_from_dfov(cam_w, cam_h, dfov) return - - - - # class TestStringMethods(unittest.TestCase): @@ -55,5 +50,5 @@ def test_negative_cam_height(self): # with self.assertRaises(TypeError): # s.split(2) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main()