Skip to content

Commit

Permalink
Support UOAIS (beta)
Browse files Browse the repository at this point in the history
  • Loading branch information
SeungBack committed Aug 22, 2021
1 parent 6bfd1cc commit bec1a4f
Show file tree
Hide file tree
Showing 9 changed files with 74 additions and 11 deletions.
3 changes: 3 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ find_package(catkin REQUIRED COMPONENTS
std_msgs
message_generation
geometry_msgs
sensor_msgs
)

## System dependencies are found with CMake's conventions
Expand Down Expand Up @@ -57,6 +58,7 @@ add_message_files(
## Generate services in the 'srv' folder
add_service_files(
FILES
GetTargetContactGraspSegm.srv
GetTargetContactGrasp.srv
GetTargetPos.srv
)
Expand All @@ -73,6 +75,7 @@ generate_messages(
DEPENDENCIES
std_msgs
geometry_msgs
sensor_msgs
)

################################################
Expand Down
22 changes: 15 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,15 @@ To generate a grasping point, simply use `get_target_grasp_pose` service
rosservice call /get_target_grasp_pose
```

Before run the deep grasping nodes, launch camera nodes
```
ROS_NAMESPACE=azure1 roslaunch azure_kinect_ros_driver driver.launch color_resolution:=1440P depth_mode:=WFOV_UNBINNED fps:=5 tf_prefix:=azure1_
```

## TODO

- python 2 --> python 3
- remove TCP connections

## GQ-CNN

Expand All @@ -23,7 +31,7 @@ rosservice call /get_target_grasp_pose

GQ-CNN Server
```
rosrun deep_grasping_ros fc_gqcnn_server.py
rosrun deep_grasping_ros gqcnn_server.py
```

GQ-CNN Client
Expand All @@ -34,9 +42,9 @@ roscd deep_grasping_ros/src/gqcnn && python gqcnn_client.py

FC-GQ-CNN Client
```
conda activate gqcnn \
&& roscd deep_grasping_ros/src/gqcnn \
&& python fc_gqcnn_client.py
conda activate gqcnn && \
roscd deep_grasping_ros/src/gqcnn && \
python fc_gqcnn_client.py
```


Expand Down Expand Up @@ -93,15 +101,15 @@ ROS_NAMESPACE=azure1 roslaunch azure_kinect_ros_driver driver.launch color_resol

contact graspnet server
```
rosrun deep_grasping_ros contact_grasp_server.py
ros27 && roscd deep_grasping_ros $$ python src/contact_grasp_server.py
```

contact graspnet client

```
ros && conda activate contact_graspnet_env \
conda activate contact_graspnet_env \
&& roscd deep_grasping_ros/src/contact_graspnet \
&& CUDA_VISIBLE_DEVICES=1 python contact_graspnet/contact_grasp_client.py --local_regions --filter_grasps
&& CUDA_VISIBLE_DEVICES=0 python contact_graspnet/contact_grasp_client.py --local_regions --filter_grasps
```

```
Expand Down
Binary file added color.npy
Binary file not shown.
1 change: 1 addition & 0 deletions msg/Grasp.msg
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
string id
float64 score
float64 width
geometry_msgs/TransformStamped transform
Binary file added segmap.npy
Binary file not shown.
18 changes: 15 additions & 3 deletions src/contact_grasp_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ def __init__(self):
self.camera_info = rospy.wait_for_message("/azure1/rgb/camera_info", CameraInfo)
self.data = {}
self.data["intrinsics_matrix"] = np.array(self.camera_info.K).reshape(3, 3)

rospy.loginfo("Successfully got camera info")

self.bridge = cv_bridge.CvBridge()
self.grasp_srv = rospy.Service('/get_target_grasp_pose', GetTargetContactGrasp, self.get_target_grasp_pose)

Expand All @@ -55,6 +56,9 @@ def __init__(self):
mid_point = 0.5*(control_points[1, :] + control_points[2, :])
self.grasp_line_plot = np.array([np.zeros((3,)), mid_point, control_points[1], control_points[3],
control_points[1], control_points[2], control_points[4]])

self.uoais_vm_pub = rospy.Publisher("uoais/visible_mask", Image, queue_size=1)
self.uoais_am_pub = rospy.Publisher("uoais/amodal_mask", Image, queue_size=1)


def get_target_grasp_pose(self, msg):
Expand Down Expand Up @@ -86,7 +90,13 @@ def get_target_grasp_pose(self, msg):
# send image to dnn client
rospy.loginfo_once("Sending rgb, depth to Contact-GraspNet client")
su.sendall_pickle(self.grasp_sock, self.data)
pred_grasps_cam, scores, contact_pts = su.recvall_pickle(self.grasp_sock)
pred_grasps_cam, scores, contact_pts, segm_result = su.recvall_pickle(self.grasp_sock)
segmap, amodal_vis, visible_vis, occlusions = \
segm_result["segm"], segm_result["amodal_vis"], segm_result["visible_vis"], segm_result["occlusions"]
self.uoais_vm_pub.publish(self.bridge.cv2_to_imgmsg(visible_vis))
self.uoais_am_pub.publish(self.bridge.cv2_to_imgmsg(amodal_vis))


if pred_grasps_cam is None:
rospy.logwarn_once("No grasps are detected")
return None
Expand Down Expand Up @@ -122,7 +132,7 @@ def get_target_grasp_pose(self, msg):
grasp.transform = t_target_grasp
grasps.append(grasp)

return GetTargetContactGraspResponse(grasps)
return GetTargetContactGraspResponse(grasps, segmap, occlusions)

def initialize_marker(self):
# Delete all existing markers
Expand All @@ -134,6 +144,7 @@ def initialize_marker(self):

def publish_marker(self, pts, frame_id, score):


markers = []
for i in range(pts.shape[1]):
marker = Marker()
Expand Down Expand Up @@ -203,6 +214,7 @@ def publish_marker(self, pts, frame_id, score):
marker_array.markers = markers
self.marker_pub.publish(marker_array)


if __name__ == '__main__':

grasp_net = ContactGraspNet()
Expand Down
5 changes: 4 additions & 1 deletion src/gqcnn_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,10 @@ def get_target_grasp_pose(self, msg):
pos = pred_grasp["pose"].position
cx = int(pred_grasp["x"])
cy = int(pred_grasp["y"])
z = depth[cy, cx] - 0.09
z = depth[cy, cx] - 0.1034
# z = pred_grasp["depth"] - 0.1034
if z == 0:
print("oh my god!!!!!!!!11")
z = np.median(depth[cy-5:cy+5, cx-5:cx+5])
r = R.from_rotvec(pred_grasp["angle"] * np.array([0, 0, 1]))
H_cam_to_target[:3, :3] = r.as_dcm()
Expand Down Expand Up @@ -136,6 +138,7 @@ def get_target_grasp_pose(self, msg):
grasp.id = "obj"
grasp.score = pred_grasp["q_value"]
grasp.transform = t_target_grasp
grasp.width = pred_grasp["width"]
grasps.append(grasp)

return GetTargetContactGraspResponse(grasps)
Expand Down
31 changes: 31 additions & 0 deletions src/test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import imgviz
import numpy as np
import cv2
import matplotlib.pyplot as plt

color = np.load("/home/demo/catkin_ws/src/deep_grasping_ros/color.npy")
segmap = np.load("/home/demo/catkin_ws/src/deep_grasping_ros/segmap.npy")

masks = []
for id in np.unique(segmap):
if id == 0.0:
continue
mask = np.zeros_like(segmap)
mask = np.where(segmap==id, True, False)
masks.append(mask)
masks = np.array(masks)

insviz = imgviz.instances2rgb(
image=color,
masks=masks,
labels=np.uint8(np.unique(segmap))[1:]
)
plt.figure(dpi=200)
plt.plot()
plt.imshow(insviz)
img = imgviz.io.pyplot_to_numpy()
plt.close()
print(img.shape, np.max(img))
# mg = imgviz.io.pyplot_to_numpy()

# plt.imshow(insviz)
5 changes: 5 additions & 0 deletions srv/GetTargetContactGraspSegm.srv
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
deep_grasping_ros/Grasp[] target_grasps
sensor_msgs/Image segmap
int32[] occlusions

0 comments on commit bec1a4f

Please sign in to comment.