diff --git a/docs/source/components/nodes/image_manip.rst b/docs/source/components/nodes/image_manip.rst index ea2c9ed48..d7d897992 100644 --- a/docs/source/components/nodes/image_manip.rst +++ b/docs/source/components/nodes/image_manip.rst @@ -87,6 +87,8 @@ Examples of functionality - :ref:`Mono & MobilenetSSD` - :ref:`RGB Encoding & Mono & MobilenetSSD` - :ref:`RGB Camera Control` +- :ref:`ImageManip tiling` - Using ImageManip for frame tiling +- :ref:`ImageManip rotate` - Using ImageManip to rotate color/mono frames Reference ######### diff --git a/docs/source/samples/image_manip_rotate.rst b/docs/source/samples/image_manip_rotate.rst new file mode 100644 index 000000000..adccbf01f --- /dev/null +++ b/docs/source/samples/image_manip_rotate.rst @@ -0,0 +1,44 @@ +ImageManip Rotate +================= + +This example showcases how to rotate color and mono frames with the help of :ref:`ImageManip` node. +In the example, we are rotating by 90°. + +.. note:: + Due to HW warp constraint, input image (to be rotated) has to have **width value of multiples of 16.** + +Demos +##### + +.. image:: https://user-images.githubusercontent.com/18037362/128074634-d2baa78e-8f35-40fc-8661-321f3a3c3850.png + :alt: Rotated mono and color frames + +Here I have DepthAI device positioned vertically on my desk. + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../examples/image_manip_rotate.py + :language: python + :linenos: + + .. tab:: C++ + + Also `available on GitHub `__ + + .. literalinclude:: ../../../depthai-core/examples/src/image_manip_rotate.cpp + :language: cpp + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/docs/source/samples/image_manip_tiling.rst b/docs/source/samples/image_manip_tiling.rst new file mode 100644 index 000000000..2b1efc75c --- /dev/null +++ b/docs/source/samples/image_manip_tiling.rst @@ -0,0 +1,41 @@ +ImageManip Tiling +================= + +Frame tiling could be useful for eg. feeding large frame into a :ref:`NeuralNetwork` whose input size isn't as large. In such case, +you can tile the large frame into multiple smaller ones and feed smaller frames to the :ref:`NeuralNetwork`. + +In this example we use 2 :ref:`ImageManip` for splitting the original :code:`1000x500` preview frame into two :code:`500x500` frames. + +Demo +#### + +.. image:: https://user-images.githubusercontent.com/18037362/128074673-045ed4b6-ac8c-4a76-83bb-0f3dc996f7a5.png + :alt: Tiling preview into 2 frames/tiles + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../examples/image_manip_tiling.py + :language: python + :linenos: + + .. tab:: C++ + + Also `available on GitHub `__ + + .. literalinclude:: ../../../depthai-core/examples/src/image_manip_tiling.cpp + :language: cpp + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/docs/source/tutorials/code_samples.rst b/docs/source/tutorials/code_samples.rst index 1488c7693..70db78a84 100644 --- a/docs/source/tutorials/code_samples.rst +++ b/docs/source/tutorials/code_samples.rst @@ -32,6 +32,8 @@ Code samples are used for automated testing. They are also a great starting poin - :ref:`Edge detector` - Edge detection on input frame - :ref:`Script camera control` - Controlling the camera with the Script node - :ref:`Bootloader version` - Retrieves Version of Bootloader on the device +- :ref:`ImageManip tiling` - Using ImageManip for frame tiling +- :ref:`ImageManip rotate` - Using ImageManip to rotate color/mono frames .. rubric:: Complex diff --git a/docs/source/tutorials/simple_samples.rst b/docs/source/tutorials/simple_samples.rst index 9e86efb33..7035f61dc 100644 --- a/docs/source/tutorials/simple_samples.rst +++ b/docs/source/tutorials/simple_samples.rst @@ -24,6 +24,8 @@ Simple ../samples/edge_detector.rst ../samples/script_camera_control.rst ../samples/bootloader_version.rst + ../samples/image_manip_tiling.rst + ../samples/image_manip_rotate.rst These samples are great starting point for the gen2 API. @@ -41,4 +43,7 @@ These samples are great starting point for the gen2 API. - :ref:`Mono & MobilenetSSD` - Runs MobileNetSSD on mono frames and displays detections on the frame - :ref:`Video & MobilenetSSD` - Runs MobileNetSSD on the video from the host - :ref:`Edge detector` - Edge detection on input frame +- :ref:`Script camera control` - Controlling the camera with the Script node - :ref:`Bootloader Version` - Retrieves Version of Bootloader on the device +- :ref:`ImageManip Tiling` - Using ImageManip for frame tiling +- :ref:`ImageManip Rotate` - Using ImageManip to rotate color/mono frames diff --git a/examples/image_manip_rotate.py b/examples/image_manip_rotate.py new file mode 100644 index 000000000..2e5c15c7b --- /dev/null +++ b/examples/image_manip_rotate.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai + +# Create pipeline +pipeline = dai.Pipeline() + +# Rotate color frames +camRgb = pipeline.createColorCamera() +camRgb.setPreviewSize(640, 400) +camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) +camRgb.setInterleaved(False) + +manipRgb = pipeline.createImageManip() +rgbRr = dai.RotatedRect() +rgbRr.center.x, rgbRr.center.y = camRgb.getPreviewWidth() // 2, camRgb.getPreviewHeight() // 2 +rgbRr.size.width, rgbRr.size.height = camRgb.getPreviewHeight(), camRgb.getPreviewWidth() +rgbRr.angle = 90 +manipRgb.initialConfig.setCropRotatedRect(rgbRr, False) +camRgb.preview.link(manipRgb.inputImage) + +manipRgbOut = pipeline.createXLinkOut() +manipRgbOut.setStreamName("manip_rgb") +manipRgb.out.link(manipRgbOut.input) + +# Rotate mono frames +monoLeft = pipeline.createMonoCamera() +monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) +monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) + +manipLeft = pipeline.createImageManip() +rr = dai.RotatedRect() +rr.center.x, rr.center.y = monoLeft.getResolutionWidth() // 2, monoLeft.getResolutionHeight() // 2 +rr.size.width, rr.size.height = monoLeft.getResolutionHeight(), monoLeft.getResolutionWidth() +rr.angle = 90 +manipLeft.initialConfig.setCropRotatedRect(rr, False) +monoLeft.out.link(manipLeft.inputImage) + +manipLeftOut = pipeline.createXLinkOut() +manipLeftOut.setStreamName("manip_left") +manipLeft.out.link(manipLeftOut.input) + +with dai.Device(pipeline) as device: + qLeft = device.getOutputQueue(name="manip_left", maxSize=8, blocking=False) + qRgb = device.getOutputQueue(name="manip_rgb", maxSize=8, blocking=False) + + while True: + inLeft = qLeft.tryGet() + if inLeft is not None: + cv2.imshow('Left rotated', inLeft.getCvFrame()) + + inRgb = qRgb.tryGet() + if inRgb is not None: + cv2.imshow('Color rotated', inRgb.getCvFrame()) + + if cv2.waitKey(1) == ord('q'): + break \ No newline at end of file diff --git a/examples/image_manip_tiling.py b/examples/image_manip_tiling.py new file mode 100644 index 000000000..441dbb53d --- /dev/null +++ b/examples/image_manip_tiling.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai + +# Create pipeline +pipeline = dai.Pipeline() + +camRgb = pipeline.createColorCamera() +camRgb.setPreviewSize(1000, 500) +camRgb.setInterleaved(False) +maxFrameSize = camRgb.getPreviewHeight() * camRgb.getPreviewHeight() * 3 + +# In this example we use 2 imageManips for splitting the original 1000x500 +# preview frame into 2 500x500 frames +manip1 = pipeline.createImageManip() +manip1.initialConfig.setCropRect(0, 0, 0.5, 1) +manip1.setMaxOutputFrameSize(maxFrameSize) +camRgb.preview.link(manip1.inputImage) + +manip2 = pipeline.createImageManip() +manip2.initialConfig.setCropRect(0.5, 0, 1, 1) +manip2.setMaxOutputFrameSize(maxFrameSize) +camRgb.preview.link(manip2.inputImage) + +xout1 = pipeline.createXLinkOut() +xout1.setStreamName('out1') +manip1.out.link(xout1.input) + +xout2 = pipeline.createXLinkOut() +xout2.setStreamName('out2') +manip2.out.link(xout2.input) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + # Output queue will be used to get the rgb frames from the output defined above + q1 = device.getOutputQueue(name="out1", maxSize=4, blocking=False) + q2 = device.getOutputQueue(name="out2", maxSize=4, blocking=False) + + while True: + in1 = q1.tryGet() + if in1 is not None: + cv2.imshow("Tile 1", in1.getCvFrame()) + + in2 = q2.tryGet() + if in2 is not None: + cv2.imshow("Tile 2", in2.getCvFrame()) + + if cv2.waitKey(1) == ord('q'): + break