From cd07f49a246962f18fc2375c20313f1c3c61194b Mon Sep 17 00:00:00 2001 From: Erol444 Date: Thu, 13 Apr 2023 18:18:59 +0200 Subject: [PATCH 1/2] Started updating depth colorization logic --- .../SpatialDetection/spatial_calculator_multi_roi.py | 9 ++++++--- examples/mixed/rotated_spatial_detections.py | 9 ++++++--- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/examples/SpatialDetection/spatial_calculator_multi_roi.py b/examples/SpatialDetection/spatial_calculator_multi_roi.py index dbf7c6fce..4752f5bfd 100755 --- a/examples/SpatialDetection/spatial_calculator_multi_roi.py +++ b/examples/SpatialDetection/spatial_calculator_multi_roi.py @@ -3,6 +3,7 @@ import cv2 import depthai as dai import math +import numpy as np # Create pipeline pipeline = dai.Pipeline() @@ -29,7 +30,7 @@ stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) stereo.setLeftRightCheck(True) -stereo.setExtendedDisparity(True) +stereo.setSubpixel(True) spatialLocationCalculator.inputConfig.setWaitForMessage(False) # Create 10 ROIs @@ -65,8 +66,10 @@ depthFrame = inDepth.getFrame() # depthFrame values are in millimeters - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) spatialData = spatialCalcQueue.get().getSpatialLocations() diff --git a/examples/mixed/rotated_spatial_detections.py b/examples/mixed/rotated_spatial_detections.py index 3b386a48b..b3ad05d80 100755 --- a/examples/mixed/rotated_spatial_detections.py +++ b/examples/mixed/rotated_spatial_detections.py @@ -4,7 +4,7 @@ import sys import cv2 import depthai as dai - +import numpy as np ''' Spatial object detections demo for 180° rotated OAK camera. ''' @@ -58,6 +58,7 @@ stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setSubpixel(True) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) rotate_stereo_manip = pipeline.createImageManip() @@ -104,8 +105,10 @@ frame = inPreview.getCvFrame() depthFrame = depth.getFrame() # depthFrame values are in millimeters - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) detections = inDet.detections From b0fa0fe3b3723311b0fa40250be918b9d479269a Mon Sep 17 00:00:00 2001 From: Erol444 Date: Thu, 13 Apr 2023 18:29:47 +0200 Subject: [PATCH 2/2] Updated other examples as well that used old colorization logic --- .../spatial_calculator_multi_roi.rst | 2 +- docs/source/tutorials/code_samples.rst | 1 + .../spatial_location_calculator.py | 23 +++++++++---------- .../SpatialDetection/spatial_mobilenet.py | 9 +++++--- .../spatial_mobilenet_mono.py | 7 ++++-- .../SpatialDetection/spatial_tiny_yolo.py | 9 +++++--- examples/StereoDepth/depth_crop_control.py | 7 ++++-- 7 files changed, 35 insertions(+), 23 deletions(-) diff --git a/docs/source/samples/SpatialDetection/spatial_calculator_multi_roi.rst b/docs/source/samples/SpatialDetection/spatial_calculator_multi_roi.rst index 10392c5e6..585a5630f 100644 --- a/docs/source/samples/SpatialDetection/spatial_calculator_multi_roi.rst +++ b/docs/source/samples/SpatialDetection/spatial_calculator_multi_roi.rst @@ -11,7 +11,7 @@ scanning camera for mobile robots. Demo #### -.. image:: https://user-images.githubusercontent.com/18037362/190861621-b57fd1e3-5a3d-4d79-b1a7-d17a0b78c63e.gif +.. image:: https://user-images.githubusercontent.com/18037362/231822498-6e3699a0-039e-424b-acb2-b246575e91ee.png Setup ##### diff --git a/docs/source/tutorials/code_samples.rst b/docs/source/tutorials/code_samples.rst index 46127c230..a7b72aefa 100644 --- a/docs/source/tutorials/code_samples.rst +++ b/docs/source/tutorials/code_samples.rst @@ -125,6 +125,7 @@ are presented with code. .. rubric:: SpatialDetection +- :ref:`Spatial Calculator Multi-ROI` - Selects multiple ROIs and calculates spatial coordinates for each of them - :ref:`Spatial location calculator` - Demonstrates how to use the spatial location calculator - :ref:`RGB & MobilenetSSD with spatial data` - Displays RGB frames with MobileNet detections and spatial coordinates on them - :ref:`Mono & MobilenetSSD with spatial data` - Displays mono frames with MobileNet detections and spatial coordinates on them diff --git a/examples/SpatialDetection/spatial_location_calculator.py b/examples/SpatialDetection/spatial_location_calculator.py index 8e30f0a3c..4f2779dfd 100755 --- a/examples/SpatialDetection/spatial_location_calculator.py +++ b/examples/SpatialDetection/spatial_location_calculator.py @@ -2,7 +2,7 @@ import cv2 import depthai as dai - +import numpy as np stepSize = 0.05 newConfig = False @@ -30,12 +30,9 @@ monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) -lrcheck = False -subpixel = False - stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) -stereo.setLeftRightCheck(lrcheck) -stereo.setSubpixel(subpixel) +stereo.setLeftRightCheck(True) +stereo.setSubpixel(True) # Config topLeft = dai.Point2f(0.4, 0.4) @@ -77,8 +74,10 @@ depthFrame = inDepth.getFrame() # depthFrame values are in millimeters - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) spatialData = spatialCalcQueue.get().getSpatialLocations() @@ -94,10 +93,10 @@ depthMax = depthData.depthMax fontType = cv2.FONT_HERSHEY_TRIPLEX - cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) - cv2.putText(depthFrameColor, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymin + 20), fontType, 0.5, 255) - cv2.putText(depthFrameColor, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymin + 35), fontType, 0.5, 255) - cv2.putText(depthFrameColor, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymin + 50), fontType, 0.5, 255) + cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1) + cv2.putText(depthFrameColor, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymin + 20), fontType, 0.5, color) + cv2.putText(depthFrameColor, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymin + 35), fontType, 0.5, color) + cv2.putText(depthFrameColor, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymin + 50), fontType, 0.5, color) # Show the frame cv2.imshow("depth", depthFrameColor) diff --git a/examples/SpatialDetection/spatial_mobilenet.py b/examples/SpatialDetection/spatial_mobilenet.py index ec2eff715..b67757b44 100755 --- a/examples/SpatialDetection/spatial_mobilenet.py +++ b/examples/SpatialDetection/spatial_mobilenet.py @@ -60,6 +60,7 @@ stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setSubpixel(True) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) spatialDetectionNetwork.setBlobPath(nnBlobPath) @@ -113,8 +114,10 @@ depthFrame = depth.getFrame() # depthFrame values are in millimeters - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) detections = inDet.detections @@ -132,7 +135,7 @@ ymin = int(topLeft.y) xmax = int(bottomRight.x) ymax = int(bottomRight.y) - cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) + cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1) # Denormalize bounding box x1 = int(detection.xmin * width) diff --git a/examples/SpatialDetection/spatial_mobilenet_mono.py b/examples/SpatialDetection/spatial_mobilenet_mono.py index 30a237d2f..f6c265071 100755 --- a/examples/SpatialDetection/spatial_mobilenet_mono.py +++ b/examples/SpatialDetection/spatial_mobilenet_mono.py @@ -59,6 +59,7 @@ # StereoDepth stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) +stereo.setSubpixel(True) # Define a neural network that will make predictions based on the source frames spatialDetectionNetwork.setConfidenceThreshold(0.5) @@ -116,8 +117,10 @@ depthFrame = inDepth.getFrame() # depthFrame values are in millimeters - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) detections = inDet.detections diff --git a/examples/SpatialDetection/spatial_tiny_yolo.py b/examples/SpatialDetection/spatial_tiny_yolo.py index 5575bccd6..0158f2b9e 100755 --- a/examples/SpatialDetection/spatial_tiny_yolo.py +++ b/examples/SpatialDetection/spatial_tiny_yolo.py @@ -84,6 +84,7 @@ # Align depth map to the perspective of RGB camera, on which inference is done stereo.setDepthAlign(dai.CameraBoardSocket.RGB) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) +stereo.setSubpixel(True) spatialDetectionNetwork.setBlobPath(nnBlobPath) spatialDetectionNetwork.setConfidenceThreshold(0.5) @@ -146,8 +147,10 @@ frame = inPreview.getCvFrame() depthFrame = depth.getFrame() # depthFrame values are in millimeters - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) counter+=1 @@ -172,7 +175,7 @@ ymin = int(topLeft.y) xmax = int(bottomRight.x) ymax = int(bottomRight.y) - cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) + cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1) # Denormalize bounding box x1 = int(detection.xmin * width) diff --git a/examples/StereoDepth/depth_crop_control.py b/examples/StereoDepth/depth_crop_control.py index eae2677a4..a9a2c7a69 100755 --- a/examples/StereoDepth/depth_crop_control.py +++ b/examples/StereoDepth/depth_crop_control.py @@ -40,6 +40,7 @@ manip.initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y) manip.setMaxOutputFrameSize(monoRight.getResolutionHeight()*monoRight.getResolutionWidth()*3) stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) +stereo.setSubpixel(True) # Linking configIn.out.link(manip.inputConfig) @@ -62,8 +63,10 @@ depthFrame = inDepth.getFrame() # depthFrame values are in millimeters # Frame is transformed, the color map will be applied to highlight the depth info - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) + depth_downscaled = depthFrame[::4] + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) # Frame is ready to be shown