From 7d42a169f512f9dcaffb8bc6c32f5a346e8bd9a3 Mon Sep 17 00:00:00 2001 From: davidliyutong <32391509+davidliyutong@users.noreply.github.com> Date: Fri, 2 Aug 2024 00:37:10 +0800 Subject: [PATCH 1/2] Add APIs to get extrinsic/distortion matrix and timestamp, this will be very helpful for researchers of robotics Changes: - [feature] add `get_timestamp_usec` method to capture - [feature] add `get_device_timestamp_usec` to capture - [feature] add `get_system_timestamp_nsec` to capture - [feature] add `get_extrinsic_matrix` to calibration for getting extrinsic matrix of cameras - [feature] add `get_distortion_parameters` for getting distortion parameters of cameras --- pykinect_azure/k4a/calibration.py | 42 +++++++++++++++++++++++++++++++ pykinect_azure/k4a/image.py | 22 ++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/pykinect_azure/k4a/calibration.py b/pykinect_azure/k4a/calibration.py index e84f77e..2db4fe6 100644 --- a/pykinect_azure/k4a/calibration.py +++ b/pykinect_azure/k4a/calibration.py @@ -1,5 +1,7 @@ import ctypes +import numpy as np + from pykinect_azure.k4a import _k4a @@ -10,6 +12,8 @@ def __init__(self, calibration_handle: _k4a.k4a_calibration_t): self._handle = calibration_handle self.color_params = self._handle.color_camera_calibration.intrinsics.parameters.param self.depth_params = self._handle.depth_camera_calibration.intrinsics.parameters.param + self.color_extrinsics = self._handle.color_camera_calibration.extrinsics + self.depth_extrinsics = self._handle.depth_camera_calibration.extrinsics def __del__(self): @@ -47,6 +51,44 @@ def get_matrix(self, camera: _k4a.k4a_calibration_type_t): [0, self.depth_params.fy, self.depth_params.cy], [0, 0, 1]] + def get_extrinsic_matrix(self): + color_rotation = np.array(list(self.color_extrinsics.rotation)).reshape(3, 3) + depth_rotation = np.array(list(self.depth_extrinsics.rotation)).reshape(3, 3) + color_translation = np.array(list(self.color_extrinsics.translation)) * 1e-3 + depth_translation = np.array(list(self.depth_extrinsics.translation)) * 1e-3 + + color_matrix = np.eye(4) + color_matrix[:3, :3] = color_rotation + color_matrix[:3, 3] = color_translation + depth_matrix = np.eye(4) + depth_matrix[:3, :3] = depth_rotation + depth_matrix[:3, 3] = depth_translation + + return { + "color": color_matrix.tolist(), + "depth": depth_matrix.tolist() + } + + def get_distortion_parameters(self): + return { + "color": { + "k": [self.color_params.k1, self.color_params.k2, self.color_params.k3, self.color_params.k4, + self.color_params.k5, self.color_params.k6], + "p": [self.color_params.p1, self.color_params.p2], + "codx": self.color_params.codx, + "cody": self.color_params.cody, + "metric_radius": self.color_params.metric_radius + }, + "depth": { + "k": [self.depth_params.k1, self.depth_params.k2, self.depth_params.k3, self.depth_params.k4, + self.depth_params.k5, self.depth_params.k6], + "p": [self.depth_params.p1, self.depth_params.p2], + "codx": self.depth_params.codx, + "cody": self.depth_params.cody, + "metric_radius": self.depth_params.metric_radius + } + } + def is_valid(self): return self._handle diff --git a/pykinect_azure/k4a/image.py b/pykinect_azure/k4a/image.py index e08ffca..6d9d13e 100644 --- a/pykinect_azure/k4a/image.py +++ b/pykinect_azure/k4a/image.py @@ -1,3 +1,5 @@ +import ctypes + import numpy as np import cv2 @@ -55,6 +57,17 @@ def format(self): def size(self): return self.get_size() + @property + def timestamp_usec(self): + return self.get_timestamp_usec() + + @property + def device_timestamp_usec(self): + return self.get_device_timestamp_usec() + + @property + def system_timestamp_nsec(self): + return self.get_system_timestamp_nsec() def get_buffer(self): if not self._handle: @@ -89,6 +102,15 @@ def get_height_pixels(self): def get_stride_bytes(self): return int(_k4a.k4a_image_get_stride_bytes(self._handle)) + def get_timestamp_usec(self): + return _k4a.k4a_image_get_timestamp_usec(self._handle) + + def get_device_timestamp_usec(self): + return _k4a.k4a_image_get_device_timestamp_usec(self._handle) + + def get_system_timestamp_nsec(self): + return _k4a.k4a_image_get_system_timestamp_nsec(self._handle) + def to_numpy(self): if not self.is_valid(): From c91e4338a5e9f1e4fd2687d7d18e68ecaa448269 Mon Sep 17 00:00:00 2001 From: davidliyutong <32391509+davidliyutong@users.noreply.github.com> Date: Fri, 23 Aug 2024 22:55:54 +0800 Subject: [PATCH 2/2] Adjust extrinsic and distortion api, add 16bit depth support refactor: get_extrinsic_matrix support camera parameter refactor: get_distortion_parameters support camera parameter feat: add create_custom16 / create_bgra32 api to support 16bit depth --- pykinect_azure/k4a/calibration.py | 49 +++++++++++++------------- pykinect_azure/k4a/image.py | 52 +++++++++++++++++++++++++++- pykinect_azure/k4a/transformation.py | 2 ++ 3 files changed, 77 insertions(+), 26 deletions(-) diff --git a/pykinect_azure/k4a/calibration.py b/pykinect_azure/k4a/calibration.py index 2db4fe6..de1e5dd 100644 --- a/pykinect_azure/k4a/calibration.py +++ b/pykinect_azure/k4a/calibration.py @@ -51,43 +51,42 @@ def get_matrix(self, camera: _k4a.k4a_calibration_type_t): [0, self.depth_params.fy, self.depth_params.cy], [0, 0, 1]] - def get_extrinsic_matrix(self): - color_rotation = np.array(list(self.color_extrinsics.rotation)).reshape(3, 3) - depth_rotation = np.array(list(self.depth_extrinsics.rotation)).reshape(3, 3) - color_translation = np.array(list(self.color_extrinsics.translation)) * 1e-3 - depth_translation = np.array(list(self.depth_extrinsics.translation)) * 1e-3 - - color_matrix = np.eye(4) - color_matrix[:3, :3] = color_rotation - color_matrix[:3, 3] = color_translation - depth_matrix = np.eye(4) - depth_matrix[:3, :3] = depth_rotation - depth_matrix[:3, 3] = depth_translation - - return { - "color": color_matrix.tolist(), - "depth": depth_matrix.tolist() - } - - def get_distortion_parameters(self): - return { - "color": { + def get_extrinsic_matrix(self, camera: _k4a.k4a_calibration_type_t): + if camera == _k4a.K4A_CALIBRATION_TYPE_COLOR: + color_rotation = np.array(list(self.color_extrinsics.rotation)).reshape(3, 3) + color_translation = np.array(list(self.color_extrinsics.translation)) * 1e-3 + color_matrix = np.eye(4) + color_matrix[:3, :3] = color_rotation + color_matrix[:3, 3] = color_translation + return color_matrix.tolist() + + elif camera == _k4a.K4A_CALIBRATION_TYPE_DEPTH: + depth_rotation = np.array(list(self.depth_extrinsics.rotation)).reshape(3, 3) + depth_translation = np.array(list(self.depth_extrinsics.translation)) * 1e-3 + depth_matrix = np.eye(4) + depth_matrix[:3, :3] = depth_rotation + depth_matrix[:3, 3] = depth_translation + return depth_matrix.tolist() + + def get_distortion_parameters(self, camera: _k4a.k4a_calibration_type_t): + if camera == _k4a.K4A_CALIBRATION_TYPE_COLOR: + return { "k": [self.color_params.k1, self.color_params.k2, self.color_params.k3, self.color_params.k4, self.color_params.k5, self.color_params.k6], "p": [self.color_params.p1, self.color_params.p2], "codx": self.color_params.codx, "cody": self.color_params.cody, "metric_radius": self.color_params.metric_radius - }, - "depth": { + } + elif camera == _k4a.K4A_CALIBRATION_TYPE_DEPTH: + return { "k": [self.depth_params.k1, self.depth_params.k2, self.depth_params.k3, self.depth_params.k4, - self.depth_params.k5, self.depth_params.k6], + self.depth_params.k5, self.depth_params.k6], "p": [self.depth_params.p1, self.depth_params.p2], "codx": self.depth_params.codx, "cody": self.depth_params.cody, "metric_radius": self.depth_params.metric_radius } - } def is_valid(self): return self._handle diff --git a/pykinect_azure/k4a/image.py b/pykinect_azure/k4a/image.py index 6d9d13e..22fbf9f 100644 --- a/pykinect_azure/k4a/image.py +++ b/pykinect_azure/k4a/image.py @@ -37,10 +37,60 @@ def create(image_format,width_pixels,height_pixels,stride_bytes): return Image(handle) + @staticmethod + def create_custom16_from_numpy(arr: np.ndarray): + if arr.dtype != np.uint16: + arr = arr.astype(np.uint16) + assert len(arr.shape) == 2 or len(arr.shape) == 3 + if len(arr.shape) == 3 and arr.shape[2] != 1: + arr = arr[:, :, 0:1] + + height, width = arr.shape[:2] + handle = _k4a.k4a_image_t() + _k4a.VERIFY(_k4a.k4a_image_create_from_buffer(_k4a.K4A_IMAGE_FORMAT_CUSTOM16, width, height, width * 2, arr.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), arr.size, None, None, handle), "Create image failed!") + + return Image(handle) + + @staticmethod + def create_custom16_from_shape(width: int, height: int): + arr = np.zeros((height, width, 1), dtype=np.uint8) + handle = _k4a.k4a_image_t() + _k4a.VERIFY(_k4a.k4a_image_create_from_buffer(_k4a.K4A_IMAGE_FORMAT_CUSTOM16, width, height, width * 2, arr.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), arr.size, None, None, handle), + "Create image failed!") + + return Image(handle) + + @staticmethod + def create_bgra32_from_numpy(arr: np.ndarray): + if arr.dtype != np.uint8: + arr = arr.astype(np.uint8) + assert len(arr.shape) == 2 or len(arr.shape) == 3 + if len(arr.shape) == 3: + if arr.shape[2] > 4: + arr = arr[:, :, 0:4] + elif arr.shape[2] < 4: + _arr = np.zeros((arr.shape[0], arr.shape[1], 4), dtype=np.uint8) + _arr[:, :, 0:arr.shape[2]] = arr + arr = _arr + else: + arr = np.repeat(arr[:, :, np.newaxis], 4, axis=2) + + height, width = arr.shape[:2] + handle = _k4a.k4a_image_t() + _k4a.VERIFY(_k4a.k4a_image_create_from_buffer(_k4a.K4A_IMAGE_FORMAT_COLOR_BGRA32, width, height, width * 4, arr.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), arr.size, None, None, handle), "Create image failed!") + + return Image(handle) + + @staticmethod + def create_bgra32_from_shape(width: int, height: int): + arr = np.zeros((height, width, 4), dtype=np.uint8) + handle = _k4a.k4a_image_t() + _k4a.VERIFY(_k4a.k4a_image_create_from_buffer(_k4a.K4A_IMAGE_FORMAT_COLOR_BGRA32, width, height, width * 4, arr.ctypes.data_as(ctypes.POINTER(ctypes.c_ubyte)), arr.size, None, None, handle), "Create image failed!") + @property def width(self): return self.get_width_pixels() - + @property def height(self): return self.get_height_pixels() diff --git a/pykinect_azure/k4a/transformation.py b/pykinect_azure/k4a/transformation.py index afcb5f2..1313b1f 100644 --- a/pykinect_azure/k4a/transformation.py +++ b/pykinect_azure/k4a/transformation.py @@ -95,6 +95,8 @@ def get_custom_bytes_per_pixel(self, custom_image): if custom_image_format == _k4a.K4A_IMAGE_FORMAT_CUSTOM8: return 1 + elif custom_image_format == _k4a.K4A_IMAGE_FORMAT_COLOR_BGRA32: + return 4 else: return 2