diff --git a/depthai-core b/depthai-core index 57efb7183..cbedca719 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 57efb7183a57d7cf56180c9a9c99744a068048d7 +Subproject commit cbedca7193ef3b5e59b5c93cb748b51467ae28ce diff --git a/docs/source/samples/depth_preview.rst b/docs/source/samples/depth_preview.rst index 8e988b89d..359f17127 100644 --- a/docs/source/samples/depth_preview.rst +++ b/docs/source/samples/depth_preview.rst @@ -5,11 +5,17 @@ This example shows how to set the SGBM (semi-global-matching) disparity-depth no over XLink to transfer the results to the host real-time, and displays the depth map in OpenCV. Note that disparity is used in this case, as it colorizes in a more intuitive way. Below is also a preview of using different median filters side-by-side on a depth image. +There are 3 depth modes which you can select inside the code: + +#. `lr_check`: used for better occlusion handling. For more information `click here `__ +#. `extended_disparity`: suitable for short range objects. For more information `click here `__ +#. `subpixel`: suitable for long range. For more information `click here `__ .. rubric:: Similiar samples: - :ref:`RGB Preview` - :ref:`Mono Preview` +- :ref:`Stereo Depth Video` Demo #### diff --git a/docs/source/samples/object_tracker_video.rst b/docs/source/samples/object_tracker_video.rst index 3cc4c0a11..703947a97 100644 --- a/docs/source/samples/object_tracker_video.rst +++ b/docs/source/samples/object_tracker_video.rst @@ -33,4 +33,12 @@ Source code :language: python :linenos: + .. tab:: C++ + + Also `available on GitHub `__ + + .. literalinclude:: ../../../depthai-core/examples/src/object_tracker_video.cpp + :language: cpp + :linenos: + .. include:: /includes/footer-short.rst diff --git a/docs/source/samples/stereo_depth_from_host.rst b/docs/source/samples/stereo_depth_from_host.rst index 3289cbc4b..88bdef556 100644 --- a/docs/source/samples/stereo_depth_from_host.rst +++ b/docs/source/samples/stereo_depth_from_host.rst @@ -1,10 +1,18 @@ Stereo Depth from host ====================== -This example shows depth map from host using stereo images. There are 3 depth modes which you can select -inside the code: left-right check, extended (for closer distance), subpixel (for longer distance). +This example shows depth map from host using stereo images. There are 3 depth modes which you can select inside the code: + +#. `lr_check`: used for better occlusion handling. For more information `click here `__ +#. `extended_disparity`: suitable for short range objects. For more information `click here `__ +#. `subpixel`: suitable for long range. For more information `click here `__ + Otherwise a median with kernel_7x7 is activated. +.. rubric:: Similiar samples: + +- :ref:`Stereo Depth Video` + Setup ##### diff --git a/docs/source/samples/stereo_depth_video.rst b/docs/source/samples/stereo_depth_video.rst new file mode 100644 index 000000000..a7298db47 --- /dev/null +++ b/docs/source/samples/stereo_depth_video.rst @@ -0,0 +1,45 @@ +Stereo Depth Video +================== + +This example is an upgraded :ref:`Depth Preview`. It has higher resolution (720p), each frame can be shown +(mono left-right, rectified left-right, disparity and depth). There are 6 modes which you can select +inside the code: + +#. `withDepth`: if you turn it off it will became :ref:`Mono Preview`, so it will show only the 2 mono cameras +#. `outputDepth`: if you turn it on it will show the depth +#. `lrcheck`: used for better occlusion handling. For more information `click here `__ +#. `extended`: suitable for short range objects. For more information `click here `__ +#. `subpixel`: suitable for long range. For more information `click here `__ + +.. rubric:: Similiar samples: + +- :ref:`Depth Preview` +- :ref:`Stereo Depth from host` + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../examples/stereo_depth_video.py + :language: python + :linenos: + + .. tab:: C++ + + Also `available on GitHub `__ + + .. literalinclude:: ../../../depthai-core/examples/src/stereo_depth_video.cpp + :language: cpp + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/docs/source/tutorials/code_samples.rst b/docs/source/tutorials/code_samples.rst index 524deb694..a472c2e2a 100644 --- a/docs/source/tutorials/code_samples.rst +++ b/docs/source/tutorials/code_samples.rst @@ -35,6 +35,7 @@ Code samples are used for automated testing. They are also a great starting poin - :ref:`Mono Camera Control` - Demonstrates how to control the mono camera (crop, exposure, sensitivity) from the host - :ref:`Depth Crop Control` - Demonstrates how to control cropping of depth frames from the host - :ref:`Stereo Depth from host` - Generates stereo depth frame from a set of mono images from the host +- :ref:`Stereo Depth Video` - An extended version of **Depth Preview** - :ref:`RGB Rotate Warp` - Demonstrates how to rotate, mirror, flip or perform perspective transform on a frame - :ref:`RGB Depth` - Displays RGB depth frames - :ref:`Auto Exposure on ROI` - Demonstrates how to use auto exposure based on the selected ROI diff --git a/docs/source/tutorials/complex_samples.rst b/docs/source/tutorials/complex_samples.rst index 9c487b248..5bf3ad466 100644 --- a/docs/source/tutorials/complex_samples.rst +++ b/docs/source/tutorials/complex_samples.rst @@ -10,6 +10,7 @@ Complex ../samples/mono_camera_control.rst ../samples/depth_crop_control.rst ../samples/stereo_depth_from_host.rst + ../samples/stereo_depth_video.rst ../samples/rgb_rotate_warp.rst ../samples/rgb_depth_aligned.rst ../samples/autoexposure_roi.rst @@ -34,6 +35,7 @@ If you want to see more interesting examples you should check out our `Experimen - :ref:`Mono Camera Control` - Demonstrates how to control the mono camera (crop, exposure, sensitivity) from the host - :ref:`Depth Crop Control` - Demonstrates how to control cropping of depth frames from the host - :ref:`Stereo Depth from host` - Generates stereo depth frame from a set of mono images from the host +- :ref:`Stereo Depth Video` - An extended version of **Depth Preview** - :ref:`RGB Rotate Warp` - Demonstrates how to rotate, mirror, flip or perform perspective transform on a frame - :ref:`RGB Depth` - Displays RGB depth frames - :ref:`Auto Exposure on ROI` - Demonstrates how to use auto exposure based on the selected ROI diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index feacb3eb0..b1fc389cf 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -118,3 +118,4 @@ add_python_example(object_tracker object_tracker.py) add_python_example(spatial_object_tracker spatial_object_tracker.py) add_python_example(object_tracker_video object_tracker_video.py) add_python_example(stereo_depth_from_host stereo_depth_from_host.py) +add_python_example(stereo_depth_video stereo_depth_video.py) diff --git a/examples/object_tracker_video.py b/examples/object_tracker_video.py index 6152dd512..f4d928faf 100755 --- a/examples/object_tracker_video.py +++ b/examples/object_tracker_video.py @@ -77,10 +77,10 @@ with dai.Device(pipeline) as device: qIn = device.getInputQueue(name="inFrame") - trackerFrameQ = device.getOutputQueue("trackerFrame", 4) - tracklets = device.getOutputQueue("tracklets", 4) - qManip = device.getOutputQueue("manip", maxSize=4) - qDet = device.getOutputQueue("nn", maxSize=4) + trackerFrameQ = device.getOutputQueue(name="trackerFrame", maxSize=4) + tracklets = device.getOutputQueue(name="tracklets", maxSize=4) + qManip = device.getOutputQueue(name="manip", maxSize=4) + qDet = device.getOutputQueue(name="nn", maxSize=4) startTime = time.monotonic() counter = 0 diff --git a/examples/stereo_depth_video.py b/examples/stereo_depth_video.py new file mode 100755 index 000000000..76149db53 --- /dev/null +++ b/examples/stereo_depth_video.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai +import numpy as np + +withDepth = True + +outputDepth = False +outputRectified = True +lrcheck = True +extended = False +subpixel = False + +# Create pipeline +pipeline = dai.Pipeline() + +# Define sources and outputs +monoLeft = pipeline.createMonoCamera() +monoRight = pipeline.createMonoCamera() +if withDepth: + stereo = pipeline.createStereoDepth() + +xoutLeft = pipeline.createXLinkOut() +xoutRight = pipeline.createXLinkOut() +xoutDisp = pipeline.createXLinkOut() +xoutDepth = pipeline.createXLinkOut() +xoutRectifL = pipeline.createXLinkOut() +xoutRectifR = pipeline.createXLinkOut() + +# XLinkOut +xoutLeft.setStreamName("left") +xoutRight.setStreamName("right") +if withDepth: + xoutDisp.setStreamName("disparity") + xoutDepth.setStreamName("depth") + xoutRectifL.setStreamName("rectified_left") + xoutRectifR.setStreamName("rectified_right") + +# Properties +monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) +monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) +monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) + +if withDepth: + # StereoDepth + stereo.setConfidenceThreshold(200) + stereo.setRectifyEdgeFillColor(0) # black, to better see the cutout + # stereo.setInputResolution(1280, 720) + stereo.setMedianFilter(dai.StereoDepthProperties.MedianFilter.MEDIAN_OFF) + stereo.setLeftRightCheck(lrcheck) + stereo.setExtendedDisparity(extended) + stereo.setSubpixel(subpixel) + + # Linking + monoLeft.out.link(stereo.left) + monoRight.out.link(stereo.right) + + stereo.syncedLeft.link(xoutLeft.input) + stereo.syncedRight.link(xoutRight.input) + stereo.disparity.link(xoutDisp.input) + + if outputRectified: + stereo.rectifiedLeft.link(xoutRectifL.input) + stereo.rectifiedRight.link(xoutRectifR.input) + + if outputDepth: + stereo.depth.link(xoutDepth.input) + +else: + # Link plugins CAM . XLINK + monoLeft.out.link(xoutLeft.input) + monoRight.out.link(xoutRight.input) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + + leftQueue = device.getOutputQueue(name="left", maxSize=8, blocking=False) + rightQueue = device.getOutputQueue(name="right", maxSize=8, blocking=False) + if (withDepth): + dispQueue = device.getOutputQueue(name="disparity", maxSize=8, blocking=False) + depthQueue = device.getOutputQueue(name="depth", maxSize=8, blocking=False) + rectifLeftQueue = device.getOutputQueue(name="rectified_left", maxSize=8, blocking=False) + rectifRightQueue = device.getOutputQueue(name="rectified_right", maxSize=8, blocking=False) + + # Disparity range is used for normalization + disparityMultiplier = 255 / stereo.getMaxDisparity() + + while True: + left = leftQueue.get() + cv2.imshow("left", left.getFrame()) + right = rightQueue.get() + cv2.imshow("right", right.getFrame()) + + if withDepth: + disparity = dispQueue.get() + disp = disparity.getCvFrame() + disp = (disp*disparityMultiplier).astype(np.uint8) # Extended disparity range + cv2.imshow("disparity", disp) + disp = cv2.applyColorMap(disp, cv2.COLORMAP_JET) + cv2.imshow("disparity_color", disp) + + if outputDepth: + depth = depthQueue.get() + cv2.imshow("depth", depth.getCvFrame().astype(np.uint16)) + + if outputRectified: + rectifL = rectifLeftQueue.get() + cv2.imshow("rectified_left", rectifL.getFrame()) + + rectifR = rectifRightQueue.get() + cv2.imshow("rectified_right", rectifR.getFrame()) + + if cv2.waitKey(1) == ord('q'): + break