diff --git a/depthai-core b/depthai-core index cbedca719..45e011f13 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit cbedca7193ef3b5e59b5c93cb748b51467ae28ce +Subproject commit 45e011f13f3dcb1f2a7e1f01fabbd0c771379308 diff --git a/docs/source/samples/queue_add_callback.rst b/docs/source/samples/queue_add_callback.rst new file mode 100644 index 000000000..8f22c21ad --- /dev/null +++ b/docs/source/samples/queue_add_callback.rst @@ -0,0 +1,39 @@ +Queue add callback +================== + +This example shows how to use queue callbacks. It sends both mono frames and color frames from the device to the +host via one :code:`XLinkOut` node. In the callback function :code:`newFrame()` we decode from which camera did +the frame come from so we can later show the frame with correct title to the user. + +Demo +#### + +.. image:: https://user-images.githubusercontent.com/18037362/120119546-309d5200-c190-11eb-932a-8235be7a4aa1.gif + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../examples/queue_add_callback.py + :language: python + :linenos: + + .. tab:: C++ + + Also `available on GitHub `__ + + .. literalinclude:: ../../../depthai-core/examples/src/queue_add_callback.cpp + :language: cpp + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/docs/source/tutorials/code_samples.rst b/docs/source/tutorials/code_samples.rst index a472c2e2a..8d17a1b21 100644 --- a/docs/source/tutorials/code_samples.rst +++ b/docs/source/tutorials/code_samples.rst @@ -18,7 +18,6 @@ Code samples are used for automated testing. They are also a great starting poin - :ref:`RGB video` - Displays high resolution frames of the RGB camera - :ref:`Mono Preview` - Displays right/left mono cameras - :ref:`Depth Preview` - Displays colorized stereo disparity -- :ref:`Device Queue Event` - Demonstrates how to use device queue events - :ref:`RGB Encoding` - Encodes RGB (1080P, 30FPS) into :code:`.h265` and saves it on the host - :ref:`RGB & Mono Encoding`- Encodes RGB (1080P, 30FPS) and both mono streams (720P, 30FPS) into :code:`.h265`/:code:`.h264` and saves them on the host - :ref:`Encoding Max Limit` - Encodes RGB (4k 25FPS) and both mono streams (720P, 25FPS) into :code:`.h265`/:code:`.h264` and saves them on the host @@ -56,4 +55,6 @@ Code samples are used for automated testing. They are also a great starting poin .. rubric:: Mixed - :ref:`System information` - Displays device system information (memory/cpu usage, temperature) -- :ref:`OpenCV support` - Demonstrates how to retrieve an image frame as an OpenCV frame \ No newline at end of file +- :ref:`OpenCV support` - Demonstrates how to retrieve an image frame as an OpenCV frame +- :ref:`Device Queue Event` - Demonstrates how to use device queue events +- :ref:`Queue add callback` - Demonstrates how to use queue callbacks \ No newline at end of file diff --git a/docs/source/tutorials/mixed_samples.rst b/docs/source/tutorials/mixed_samples.rst index 2cdd51b9e..5168fb9ed 100644 --- a/docs/source/tutorials/mixed_samples.rst +++ b/docs/source/tutorials/mixed_samples.rst @@ -8,6 +8,10 @@ Mixed ../samples/system_information.rst ../samples/opencv_support.rst + ../samples/device_queue_event.rst + ../samples/queue_add_callback.rst - :ref:`System information` - Displays device system information (memory/cpu usage, temperature) -- :ref:`OpenCV support` - Demonstrates how to retrieve an image frame as an OpenCV frame \ No newline at end of file +- :ref:`OpenCV support` - Demonstrates how to retrieve an image frame as an OpenCV frame +- :ref:`Device Queue Event` - Demonstrates how to use device queue events +- :ref:`Queue add callback` - Demonstrates how to use queue callbacks \ No newline at end of file diff --git a/docs/source/tutorials/simple_samples.rst b/docs/source/tutorials/simple_samples.rst index 9d2cc7c12..3ed78c076 100644 --- a/docs/source/tutorials/simple_samples.rst +++ b/docs/source/tutorials/simple_samples.rst @@ -10,7 +10,6 @@ Simple ../samples/rgb_video.rst ../samples/mono_preview.rst ../samples/depth_preview.rst - ../samples/device_queue_event.rst ../samples/rgb_encoding.rst ../samples/rgb_mono_encoding.rst ../samples/encoding_max_limit.rst @@ -27,7 +26,6 @@ These samples are great starting point for the gen2 API. - :ref:`RGB video` - Displays high resolution frames of the RGB camera - :ref:`Mono Preview` - Displays right/left mono cameras - :ref:`Depth Preview` - Displays colorized stereo disparity -- :ref:`Device Queue Event` - Demonstrates how to use device queue events - :ref:`RGB Encoding` - Encodes RGB (1080P, 30FPS) into :code:`.h265` and saves it on the host - :ref:`RGB & Mono Encoding`- Encodes RGB (1080P, 30FPS) and both mono streams (720P, 30FPS) into :code:`.h265`/:code:`.h264` and saves them on the host - :ref:`Encoding Max Limit` - Encodes RGB (4k 25FPS) and both mono streams (720P, 25FPS) into :code:`.h265`/:code:`.h264` and saves them on the host diff --git a/examples/queue_add_callback.py b/examples/queue_add_callback.py new file mode 100755 index 000000000..949f9e610 --- /dev/null +++ b/examples/queue_add_callback.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +import cv2 +import depthai as dai +import queue + +# Create pipeline +pipeline = dai.Pipeline() + +# Add all three cameras +camRgb = pipeline.createColorCamera() +left = pipeline.createMonoCamera() +right = pipeline.createMonoCamera() + +# Create XLink output +xout = pipeline.createXLinkOut() +xout.setStreamName("frames") + +# Properties +camRgb.setPreviewSize(300, 300) +left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) +right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) + +# Stream all the camera streams through the same XLink node +camRgb.preview.link(xout.input) +left.out.link(xout.input) +right.out.link(xout.input) + +q = queue.Queue() + +def newFrame(inFrame): + global q + # Get "stream name" from the instance number + num = inFrame.getInstanceNum() + name = "color" if num == 0 else "left" if num == 1 else "right" + frame = inFrame.getCvFrame() + # This is a different thread and you could use it to + # run image processing algorithms here + q.put({"name": name, "frame": frame}) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + + # Add callback to the output queue "frames" for all newly arrived frames (color, left, right) + device.getOutputQueue(name="frames", maxSize=4, blocking=False).addCallback(newFrame) + + while True: + # You could also get the data as non-blocking (block=False) + data = q.get(block=True) + cv2.imshow(data["name"], data["frame"]) + + if cv2.waitKey(1) == ord('q'): + break