From 23c449e2a9f7972f78927713a28f5b9cb74ca48b Mon Sep 17 00:00:00 2001 From: Erol444 Date: Thu, 1 Apr 2021 15:52:13 +0200 Subject: [PATCH 1/3] fixed minor typos, comments, docs --- docs/source/samples/04_rgb_encoding.rst | 2 +- docs/source/samples/05_rgb_mono_encoding.rst | 2 +- .../samples/06_rgb_full_resolution_saver.rst | 2 +- docs/source/samples/09_mono_mobilenet.rst | 2 +- .../samples/10_mono_depth_mobilenetssd.rst | 2 +- .../11_rgb_encoding_mono_mobilenet.rst | 6 ++-- .../12_rgb_encoding_mono_mobilenet_depth.rst | 8 ++--- docs/source/samples/13_encoding_max_limit.rst | 4 +-- docs/source/samples/15_rgb_mobilenet_4k.rst | 6 ++-- docs/source/samples/17_video_mobilenet.rst | 4 +-- .../samples/18_rgb_encoding_mobilenet.rst | 4 +-- .../21_mobilenet_decoding_on_device.rst | 2 +- .../22_1_tiny_yolo_v3_decoding_on_device.rst | 2 +- docs/source/samples/24_opencv_support.rst | 2 +- docs/source/samples/25_system_information.rst | 2 +- .../source/samples/26_1_spatial_mobilenet.rst | 2 +- .../samples/26_2_spatial_mobilenet_mono.rst | 4 +-- .../source/samples/26_3_spatial_tiny_yolo.rst | 4 +-- .../27_spatial_location_calculator.rst | 2 +- examples/01_rgb_preview.py | 2 +- examples/02_mono_preview.py | 4 +-- examples/03_depth_preview.py | 29 ++--------------- examples/04_rgb_encoding.py | 6 ++-- examples/05_rgb_mono_encoding.py | 2 +- examples/06_rgb_full_resolution_saver.py | 4 +-- examples/07_mono_full_resolution_saver.py | 12 +++---- examples/08_rgb_mobilenet.py | 8 ++--- examples/09_mono_mobilenet.py | 4 +-- examples/10_mono_depth_mobilenetssd.py | 32 +++---------------- examples/11_rgb_encoding_mono_mobilenet.py | 2 +- .../12_rgb_encoding_mono_mobilenet_depth.py | 2 +- examples/13_encoding_max_limit.py | 4 +-- examples/14_color_camera_control.py | 2 +- examples/15_rgb_mobilenet_4k.py | 6 ++-- examples/16_device_queue_event.py | 10 +++--- examples/17_video_mobilenet.py | 16 +++++----- examples/19_mono_camera_control.py | 2 +- examples/23_autoexposure_roi.py | 2 +- 38 files changed, 83 insertions(+), 128 deletions(-) diff --git a/docs/source/samples/04_rgb_encoding.rst b/docs/source/samples/04_rgb_encoding.rst index ed61d54aa..8a4e0f451 100644 --- a/docs/source/samples/04_rgb_encoding.rst +++ b/docs/source/samples/04_rgb_encoding.rst @@ -9,7 +9,7 @@ encoded video over XLINK to the host, saving it to disk as a video file. Pressing Ctrl+C will stop the recording and then convert it using ffmpeg into an mp4 to make it playable. Note that ffmpeg will need to be installed and runnable for the conversion to mp4 to succeed. -Be careful, this example saves encoded video to your host storage. So if you leave them running, +Be careful, this example saves encoded video to your host storage. So if you leave it running, you could fill up your storage on your host. diff --git a/docs/source/samples/05_rgb_mono_encoding.rst b/docs/source/samples/05_rgb_mono_encoding.rst index 96b842947..2ca6a3664 100644 --- a/docs/source/samples/05_rgb_mono_encoding.rst +++ b/docs/source/samples/05_rgb_mono_encoding.rst @@ -8,7 +8,7 @@ each, all at 30FPS. Each encoded video stream is transferred over XLINK and save Pressing Ctrl+C will stop the recording and then convert it using ffmpeg into an mp4 to make it playable. Note that ffmpeg will need to be installed and runnable for the conversion to mp4 to succeed. -Be careful, this example saves encoded video to your host storage. So if you leave them running, +Be careful, this example saves encoded video to your host storage. So if you leave it running, you could fill up your storage on your host. Demo diff --git a/docs/source/samples/06_rgb_full_resolution_saver.rst b/docs/source/samples/06_rgb_full_resolution_saver.rst index be2e54d36..76fc66a41 100644 --- a/docs/source/samples/06_rgb_full_resolution_saver.rst +++ b/docs/source/samples/06_rgb_full_resolution_saver.rst @@ -8,7 +8,7 @@ uncompressed frames, processed by ISP, and raw - BayerRG (R_Gr_Gb_B), as read fr 10-bit packed. See here for the pull request on this capability. Be careful, this example saves full resolution .png pictures to your host storage. So if you leave -them running, you could fill up your storage on your host. +it running, you could fill up your storage on your host. Demo #### diff --git a/docs/source/samples/09_mono_mobilenet.rst b/docs/source/samples/09_mono_mobilenet.rst index 9cb99f9c5..8e6d367d7 100644 --- a/docs/source/samples/09_mono_mobilenet.rst +++ b/docs/source/samples/09_mono_mobilenet.rst @@ -19,7 +19,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ diff --git a/docs/source/samples/10_mono_depth_mobilenetssd.rst b/docs/source/samples/10_mono_depth_mobilenetssd.rst index da36998b5..aac1c1bba 100644 --- a/docs/source/samples/10_mono_depth_mobilenetssd.rst +++ b/docs/source/samples/10_mono_depth_mobilenetssd.rst @@ -19,7 +19,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/11_rgb_encoding_mono_mobilenet.rst b/docs/source/samples/11_rgb_encoding_mono_mobilenet.rst index 925c15386..a870d4145 100644 --- a/docs/source/samples/11_rgb_encoding_mono_mobilenet.rst +++ b/docs/source/samples/11_rgb_encoding_mono_mobilenet.rst @@ -3,13 +3,13 @@ This example shows how to configure the depthai video encoder in h.265 format to encode the RGB camera input at Full-HD resolution at 30FPS, and transfers the encoded video over XLINK to the host, -saving it to disk as a video file. In the same time, a MobileNetv2SSD network is ran on the +saving it to disk as a video file. At the same time, a MobileNetv2SSD network is ran on the frames from right grayscale camera Pressing Ctrl+C will stop the recording and then convert it using ffmpeg into an mp4 to make it playable. Note that ffmpeg will need to be installed and runnable for the conversion to mp4 to succeed. -Be careful, this example saves encoded video to your host storage. So if you leave them running, +Be careful, this example saves encoded video to your host storage. So if you leave it running, you could fill up your storage on your host. Demo @@ -26,7 +26,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/12_rgb_encoding_mono_mobilenet_depth.rst b/docs/source/samples/12_rgb_encoding_mono_mobilenet_depth.rst index cca2e8a76..c7abff6f7 100644 --- a/docs/source/samples/12_rgb_encoding_mono_mobilenet_depth.rst +++ b/docs/source/samples/12_rgb_encoding_mono_mobilenet_depth.rst @@ -1,9 +1,9 @@ 12 - RGB Encoding & Mono with MobilenetSSD & Depth -================================================ +================================================== This example shows how to configure the depthai video encoder in h.265 format to encode the RGB camera input at Full-HD resolution at 30FPS, and transfers the encoded video over XLINK to the host, -saving it to disk as a video file. In the same time, a MobileNetv2SSD network is ran on the +saving it to disk as a video file. At the same time, a MobileNetv2SSD network is ran on the frames from right grayscale camera, while the application also displays the depth map produced by both of the grayscale cameras. Note that disparity is used in this case, as it colorizes in a more intuitive way. @@ -11,7 +11,7 @@ intuitive way. Pressing Ctrl+C will stop the recording and then convert it using ffmpeg into an mp4 to make it playable. Note that ffmpeg will need to be installed and runnable for the conversion to mp4 to succeed. -Be careful, this example saves encoded video to your host storage. So if you leave them running, +Be careful, this example saves encoded video to your host storage. So if you leave it running, you could fill up your storage on your host. Demo @@ -28,7 +28,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/13_encoding_max_limit.rst b/docs/source/samples/13_encoding_max_limit.rst index 89c6933db..7b6604e92 100644 --- a/docs/source/samples/13_encoding_max_limit.rst +++ b/docs/source/samples/13_encoding_max_limit.rst @@ -1,5 +1,5 @@ 13 - Encoding Max Limit -===================== +======================= This example shows how to set up the encoder node to encode the RGB camera and both grayscale cameras (of DepthAI/OAK-D) at the same time, having all encoder parameters set to maximum quality and FPS. @@ -9,7 +9,7 @@ Each encoded video stream is transferred over XLINK and saved to a respective fi Pressing Ctrl+C will stop the recording and then convert it using ffmpeg into an mp4 to make it playable. Note that ffmpeg will need to be installed and runnable for the conversion to mp4 to succeed. -Be careful, this example saves encoded video to your host storage. So if you leave them running, +Be careful, this example saves encoded video to your host storage. So if you leave it running, you could fill up your storage on your host. Demo diff --git a/docs/source/samples/15_rgb_mobilenet_4k.rst b/docs/source/samples/15_rgb_mobilenet_4k.rst index 7639a1a7a..77a1e770f 100644 --- a/docs/source/samples/15_rgb_mobilenet_4k.rst +++ b/docs/source/samples/15_rgb_mobilenet_4k.rst @@ -1,9 +1,9 @@ 15 - 4K RGB MobileNetSSD ======================== -This example shows how to MobileNetv2SSD on the RGB input frame, and how to display both the RGB +This example shows how to run MobileNetv2SSD on the RGB input frame, and how to display both the RGB preview and the metadata results from the MobileNetv2SSD on the preview. -The preview size is set to 4K resolution +The preview size is set to 4K resolution. Demo #### @@ -20,7 +20,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_5shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/17_video_mobilenet.rst b/docs/source/samples/17_video_mobilenet.rst index 4b51a2e4f..1d1f97b37 100644 --- a/docs/source/samples/17_video_mobilenet.rst +++ b/docs/source/samples/17_video_mobilenet.rst @@ -21,9 +21,9 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) and prerecorded video +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_8shave.blob` file) and prerecorded video (:code:`construction_vest.mp4` file) to work - you can download them -here: `mobilenet.blob `__ +here: `mobilenet-ssd_openvino_2021.2_8shave.blob `__ and `construction_vest.mp4 `__ Source code diff --git a/docs/source/samples/18_rgb_encoding_mobilenet.rst b/docs/source/samples/18_rgb_encoding_mobilenet.rst index 86e6e25d3..95ca4c0d3 100644 --- a/docs/source/samples/18_rgb_encoding_mobilenet.rst +++ b/docs/source/samples/18_rgb_encoding_mobilenet.rst @@ -9,7 +9,7 @@ frames from the same RGB camera that is used for encoding Pressing Ctrl+C will stop the recording and then convert it using ffmpeg into an mp4 to make it playable. Note that ffmpeg will need to be installed and runnable for the conversion to mp4 to succeed. -Be careful, this example saves encoded video to your host storage. So if you leave them running, +Be careful, this example saves encoded video to your host storage. So if you leave it running, you could fill up your storage on your host. Demo @@ -26,7 +26,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/21_mobilenet_decoding_on_device.rst b/docs/source/samples/21_mobilenet_decoding_on_device.rst index 291dcb0f5..7c4f30520 100644 --- a/docs/source/samples/21_mobilenet_decoding_on_device.rst +++ b/docs/source/samples/21_mobilenet_decoding_on_device.rst @@ -17,7 +17,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/22_1_tiny_yolo_v3_decoding_on_device.rst b/docs/source/samples/22_1_tiny_yolo_v3_decoding_on_device.rst index b8609f35e..2e4a99d67 100644 --- a/docs/source/samples/22_1_tiny_yolo_v3_decoding_on_device.rst +++ b/docs/source/samples/22_1_tiny_yolo_v3_decoding_on_device.rst @@ -21,7 +21,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`tiny-yolo-v3_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/24_opencv_support.rst b/docs/source/samples/24_opencv_support.rst index 427f7cc44..4316e0208 100644 --- a/docs/source/samples/24_opencv_support.rst +++ b/docs/source/samples/24_opencv_support.rst @@ -1,5 +1,5 @@ 24 - OpenCV support -========================= +=================== This example shows API which exposes both numpy and OpenCV compatible image types for eaiser usage. It uses ColorCamera node to retrieve both BGR interleaved 'preview' and NV12 encoded 'video' frames. diff --git a/docs/source/samples/25_system_information.rst b/docs/source/samples/25_system_information.rst index 9ea02ad69..fa89e19f2 100644 --- a/docs/source/samples/25_system_information.rst +++ b/docs/source/samples/25_system_information.rst @@ -1,5 +1,5 @@ 25 - System information -========================= +======================= This example shows how to get system information (memory usage, cpu usage and temperature) from the board. diff --git a/docs/source/samples/26_1_spatial_mobilenet.rst b/docs/source/samples/26_1_spatial_mobilenet.rst index 2b0d52654..26b49fb86 100644 --- a/docs/source/samples/26_1_spatial_mobilenet.rst +++ b/docs/source/samples/26_1_spatial_mobilenet.rst @@ -2,7 +2,7 @@ =========================================== This example shows how to run MobileNetv2SSD on the RGB input frame, and how to display both the RGB -preview, detections, depth map and spatial information (X,Y,Z). It's similar to example +preview, detections, depth map and spatial information (X,Y,Z). It's similar to example '21_mobilenet_decoding_on_device' except it has spatial data. X,Y,Z coordinates are relative to the center of depth map. diff --git a/docs/source/samples/26_2_spatial_mobilenet_mono.rst b/docs/source/samples/26_2_spatial_mobilenet_mono.rst index 86c340aac..89a9eb08d 100644 --- a/docs/source/samples/26_2_spatial_mobilenet_mono.rst +++ b/docs/source/samples/26_2_spatial_mobilenet_mono.rst @@ -1,8 +1,8 @@ 26.2 - MONO & MobilenetSSD with spatial data ============================================ -This example shows how to run MobileNetv2SSD on the rectified right input frame, and -how to display both the preview, detections, depth map and spatial information (X,Y,Z). +This example shows how to run MobileNetv2SSD on the rectified right input frame, and +how to display both the preview, detections, depth map and spatial information (X,Y,Z). It's similar to example '21_mobilenet_decoding_on_device' except it has spatial data. X,Y,Z coordinates are relative to the center of depth map. diff --git a/docs/source/samples/26_3_spatial_tiny_yolo.rst b/docs/source/samples/26_3_spatial_tiny_yolo.rst index 91c4ce930..29c13ba3b 100644 --- a/docs/source/samples/26_3_spatial_tiny_yolo.rst +++ b/docs/source/samples/26_3_spatial_tiny_yolo.rst @@ -1,8 +1,8 @@ 26.3 - RGB & TinyYolo with spatial data -=========================================== +======================================= This example shows how to run TinyYoloV3 and v4 on the RGB input frame, and how to display both the RGB -preview, detections, depth map and spatial information (X,Y,Z). It's similar to example +preview, detections, depth map and spatial information (X,Y,Z). It's similar to example '26_1_spatial_mobilenet' except it is running TinyYolo network. X,Y,Z coordinates are relative to the center of depth map. diff --git a/docs/source/samples/27_spatial_location_calculator.rst b/docs/source/samples/27_spatial_location_calculator.rst index 5a41e034b..deefae8e0 100644 --- a/docs/source/samples/27_spatial_location_calculator.rst +++ b/docs/source/samples/27_spatial_location_calculator.rst @@ -17,7 +17,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/examples/01_rgb_preview.py b/examples/01_rgb_preview.py index ea7523ff2..eb4f70078 100755 --- a/examples/01_rgb_preview.py +++ b/examples/01_rgb_preview.py @@ -19,7 +19,7 @@ xoutRgb.setStreamName("rgb") camRgb.preview.link(xoutRgb.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/02_mono_preview.py b/examples/02_mono_preview.py index e5ae07e07..5d378c5b7 100755 --- a/examples/02_mono_preview.py +++ b/examples/02_mono_preview.py @@ -24,7 +24,7 @@ xoutRight.setStreamName('right') camRight.out.link(xoutRight.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -37,7 +37,7 @@ frameRight = None while True: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inLeft = qLeft.tryGet() inRight = qRight.tryGet() diff --git a/examples/03_depth_preview.py b/examples/03_depth_preview.py index ba0a55938..829b4a844 100755 --- a/examples/03_depth_preview.py +++ b/examples/03_depth_preview.py @@ -47,7 +47,7 @@ xout.setStreamName("disparity") depth.disparity.link(xout.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -59,32 +59,9 @@ inDepth = q.get() # blocking call, will wait until a new data has arrived frame = inDepth.getFrame() frame = cv2.normalize(frame, None, 0, 255, cv2.NORM_MINMAX) - frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET) - - # Uncomment one of these and comment the one given above - # to see visualisation in different color frames - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_BONE) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_AUTUMN) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_WINTER) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_RAINBOW) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_OCEAN) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_SUMMER) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_SPRING) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_COOL) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_HSV) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_HOT) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_PINK) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_PARULA) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_MAGMA) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_INFERNO) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_PLASMA) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_VIRIDIS) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_CIVIDIS) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_TWILIGHT) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_TWILIGHT_SHIFTED) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_TURBO) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_DEEPGREEN) + # Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html + frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET) # frame is ready to be shown cv2.imshow("disparity", frame) diff --git a/examples/04_rgb_encoding.py b/examples/04_rgb_encoding.py index 28710e7c3..348b6bd46 100755 --- a/examples/04_rgb_encoding.py +++ b/examples/04_rgb_encoding.py @@ -20,7 +20,7 @@ videoOut.setStreamName('h265') videoEncoder.bitstream.link(videoOut.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -33,8 +33,8 @@ print("Press Ctrl+C to stop encoding...") try: while True: - h264Packet = q.get() # blocking call, will wait until a new data has arrived - h264Packet.getData().tofile(videoFile) # appends the packet data to the opened file + h264Packet = q.get() # Blocking call, will wait until a new data has arrived + h264Packet.getData().tofile(videoFile) # Appends the packet data to the opened file except KeyboardInterrupt: # Keyboard interrupt (Ctrl + C) detected pass diff --git a/examples/05_rgb_mono_encoding.py b/examples/05_rgb_mono_encoding.py index 11bc5c478..2c681ed8d 100755 --- a/examples/05_rgb_mono_encoding.py +++ b/examples/05_rgb_mono_encoding.py @@ -39,7 +39,7 @@ ve3.bitstream.link(ve3Out.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as dev: # Start pipeline dev.startPipeline() diff --git a/examples/06_rgb_full_resolution_saver.py b/examples/06_rgb_full_resolution_saver.py index 9e526cef1..e0a3725cb 100755 --- a/examples/06_rgb_full_resolution_saver.py +++ b/examples/06_rgb_full_resolution_saver.py @@ -29,7 +29,7 @@ videoEnc.bitstream.link(xoutJpeg.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -42,7 +42,7 @@ Path('06_data').mkdir(parents=True, exist_ok=True) while True: - inRgb = qRgb.tryGet() # non-blocking call, will return a new data that has arrived or None otherwise + inRgb = qRgb.tryGet() # Non-blocking call, will return a new data that has arrived or None otherwise if inRgb is not None: cv2.imshow("rgb", inRgb.getCvFrame()) diff --git a/examples/07_mono_full_resolution_saver.py b/examples/07_mono_full_resolution_saver.py index 83749f01c..a8bb6c886 100755 --- a/examples/07_mono_full_resolution_saver.py +++ b/examples/07_mono_full_resolution_saver.py @@ -19,11 +19,11 @@ xoutRight.setStreamName("right") camRight.out.link(xoutRight.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() - + # Output queue will be used to get the grayscale frames from the output defined above qRight = device.getOutputQueue(name="right", maxSize=4, blocking=False) @@ -31,12 +31,12 @@ Path('07_data').mkdir(parents=True, exist_ok=True) while True: - inRight = qRight.get() # blocking call, will wait until a new data has arrived - # data is originally represented as a flat 1D array, it needs to be converted into HxW form + inRight = qRight.get() # Blocking call, will wait until a new data has arrived + # Data is originally represented as a flat 1D array, it needs to be converted into HxW form frameRight = inRight.getCvFrame() - # frame is transformed and ready to be shown + # Frame is transformed and ready to be shown cv2.imshow("right", frameRight) - # after showing the frame, it's being stored inside a target directory as a PNG image + # After showing the frame, it's being stored inside a target directory as a PNG image cv2.imwrite(f"07_data/{int(time.time() * 10000)}.png", frameRight) if cv2.waitKey(1) == ord('q'): diff --git a/examples/08_rgb_mobilenet.py b/examples/08_rgb_mobilenet.py index d17104260..f30a2a572 100755 --- a/examples/08_rgb_mobilenet.py +++ b/examples/08_rgb_mobilenet.py @@ -47,7 +47,7 @@ "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -78,11 +78,11 @@ def displayFrame(name, frame): while True: if args.sync: - # use blocking get() call to catch frame and inference result synced + # Use blocking get() call to catch frame and inference result synced inRgb = qRgb.get() inDet = qDet.get() else: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inRgb = qRgb.tryGet() inDet = qDet.tryGet() @@ -95,7 +95,7 @@ def displayFrame(name, frame): detections = inDet.detections counter += 1 - # if the frame is available, draw bounding boxes on it and show the frame + # If the frame is available, draw bounding boxes on it and show the frame if frame is not None: displayFrame("rgb", frame) diff --git a/examples/09_mono_mobilenet.py b/examples/09_mono_mobilenet.py index a98bec289..63602f284 100755 --- a/examples/09_mono_mobilenet.py +++ b/examples/09_mono_mobilenet.py @@ -48,7 +48,7 @@ labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -76,7 +76,7 @@ def displayFrame(name, frame): while True: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inRight = qRight.tryGet() inDet = qDet.tryGet() diff --git a/examples/10_mono_depth_mobilenetssd.py b/examples/10_mono_depth_mobilenetssd.py index eac21f9fb..c55ea3300 100755 --- a/examples/10_mono_depth_mobilenetssd.py +++ b/examples/10_mono_depth_mobilenetssd.py @@ -66,7 +66,7 @@ labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -89,7 +89,7 @@ def frameNorm(frame, bbox): return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int) while True: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inRight = qRight.tryGet() inDet = qDet.tryGet() inDepth = qDepth.tryGet() @@ -101,35 +101,11 @@ def frameNorm(frame, bbox): detections = inDet.detections if inDepth is not None: + # Frame is transformed, the color map will be applied to highlight the depth info depthFrame = cv2.flip(inDepth.getFrame(), 1) - # frame is transformed, the color map will be applied to highlight the depth info + # Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_JET) - # Uncomment one of these and comment the one given above - # to see visualisation in different color frames - - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_BONE) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_AUTUMN) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_WINTER) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_RAINBOW) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_OCEAN) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_SUMMER) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_SPRING) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_COOL) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_HSV) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_HOT) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_PINK) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_PARULA) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_MAGMA) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_INFERNO) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_PLASMA) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_VIRIDIS) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_CIVIDIS) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_TWILIGHT) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_TWILIGHT_SHIFTED) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_TURBO) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_DEEPGREEN) - if rightFrame is not None: for detection in detections: bbox = frameNorm(rightFrame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax)) diff --git a/examples/11_rgb_encoding_mono_mobilenet.py b/examples/11_rgb_encoding_mono_mobilenet.py index 98d75b31d..578bbb96b 100755 --- a/examples/11_rgb_encoding_mono_mobilenet.py +++ b/examples/11_rgb_encoding_mono_mobilenet.py @@ -60,7 +60,7 @@ "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/12_rgb_encoding_mono_mobilenet_depth.py b/examples/12_rgb_encoding_mono_mobilenet_depth.py index b1cff44f1..0de36b0d0 100755 --- a/examples/12_rgb_encoding_mono_mobilenet_depth.py +++ b/examples/12_rgb_encoding_mono_mobilenet_depth.py @@ -75,7 +75,7 @@ "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/13_encoding_max_limit.py b/examples/13_encoding_max_limit.py index ce076c70a..bd97d74b1 100755 --- a/examples/13_encoding_max_limit.py +++ b/examples/13_encoding_max_limit.py @@ -24,7 +24,7 @@ ve2Out.setStreamName('ve2Out') ve3Out.setStreamName('ve3Out') -#setting to 26fps will trigger error +# Setting to 26fps will trigger error ve1.setDefaultProfilePreset(1280, 720, 25, dai.VideoEncoderProperties.Profile.H264_MAIN) ve2.setDefaultProfilePreset(3840, 2160, 25, dai.VideoEncoderProperties.Profile.H265_MAIN) ve3.setDefaultProfilePreset(1280, 720, 25, dai.VideoEncoderProperties.Profile.H264_MAIN) @@ -39,7 +39,7 @@ ve3.bitstream.link(ve3Out.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as dev: # Prepare data queues diff --git a/examples/14_color_camera_control.py b/examples/14_color_camera_control.py index e9fe2532c..3d0aebc9c 100755 --- a/examples/14_color_camera_control.py +++ b/examples/14_color_camera_control.py @@ -62,7 +62,7 @@ def clamp(num, v0, v1): return max(v0, min(num, v1)) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as dev: # Get data queues diff --git a/examples/15_rgb_mobilenet_4k.py b/examples/15_rgb_mobilenet_4k.py index 4459339db..007745530 100755 --- a/examples/15_rgb_mobilenet_4k.py +++ b/examples/15_rgb_mobilenet_4k.py @@ -46,11 +46,11 @@ labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() - + # Output queues will be used to get the frames and nn data from the outputs defined above qVideo = device.getOutputQueue(name="video", maxSize=4, blocking=False) qPreview = device.getOutputQueue(name="preview", maxSize=4, blocking=False) @@ -79,7 +79,7 @@ def displayFrame(name, frame): print("Resize video window with mouse drag!") while True: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inVideo = qVideo.tryGet() inPreview = qPreview.tryGet() inDet = qDet.tryGet() diff --git a/examples/16_device_queue_event.py b/examples/16_device_queue_event.py index 553f05116..419e6fc35 100755 --- a/examples/16_device_queue_event.py +++ b/examples/16_device_queue_event.py @@ -1,7 +1,9 @@ #!/usr/bin/env python3 -# This example demonstrates use of queue events to block a thread until a message -# arrives to any (of the specified) queue +""" + This example demonstrates use of queue events to block a thread until a message + arrives to any (of the specified) queue +""" import cv2 import depthai as dai @@ -29,11 +31,11 @@ camMono.out.link(xoutMono.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() - + # Clear queue events device.getQueueEvents() diff --git a/examples/17_video_mobilenet.py b/examples/17_video_mobilenet.py index dca49608c..d42bca9e4 100755 --- a/examples/17_video_mobilenet.py +++ b/examples/17_video_mobilenet.py @@ -17,10 +17,9 @@ # Start defining a pipeline pipeline = dai.Pipeline() - -# Create neural network input -xinDet = pipeline.createXLinkIn() -xinDet.setStreamName("inDet") +# Create xLink input to which host will send frames from the video file +xinFrame = pipeline.createXLinkIn() +xinFrame.setStreamName("inFrame") # Define a neural network that will make predictions based on the source frames nn = pipeline.createMobileNetDetectionNetwork() @@ -28,7 +27,7 @@ nn.setBlobPath(nnPath) nn.setNumInferenceThreads(2) nn.input.setBlocking(False) -xinDet.out.link(nn.input) +xinFrame.out.link(nn.input) # Create output nnOut = pipeline.createXLinkOut() @@ -44,9 +43,10 @@ with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() - - # Output queues will be used to get the rgb frames and nn data from the outputs defined above - qIn = device.getInputQueue(name="inDet") + + # Output queue will be used to get nn data from the video frames. + # Input queue will be used to send video frames to the device. + qIn = device.getInputQueue(name="inFrame") qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False) frame = None diff --git a/examples/19_mono_camera_control.py b/examples/19_mono_camera_control.py index bb4eb076b..d59cf3737 100755 --- a/examples/19_mono_camera_control.py +++ b/examples/19_mono_camera_control.py @@ -68,7 +68,7 @@ def clamp(num, v0, v1): return max(v0, min(num, v1)) frameRight = None while True: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inLeft = qLeft.tryGet() inRight = qRight.tryGet() diff --git a/examples/23_autoexposure_roi.py b/examples/23_autoexposure_roi.py index 8ce0fa11a..d565334f8 100755 --- a/examples/23_autoexposure_roi.py +++ b/examples/23_autoexposure_roi.py @@ -132,7 +132,7 @@ def displayFrame(name, frame): cv2.imshow(name, frame) while True: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inRgb = qRgb.tryGet() inDet = qDet.tryGet() From c9f2275560291a365684c4af5ad49d5d410bddd0 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Thu, 1 Apr 2021 21:08:29 +0200 Subject: [PATCH 2/3] fixed minor typos, comments, added context manager in 27, fixed blob name in 26.3 --- .../21_mobilenet_decoding_on_device.rst | 28 +--- docs/source/samples/23_autoexposure_roi.rst | 4 +- docs/source/samples/24_opencv_support.rst | 3 - docs/source/samples/25_system_information.rst | 2 - .../source/samples/26_1_spatial_mobilenet.rst | 2 +- .../samples/26_2_spatial_mobilenet_mono.rst | 2 +- .../27_spatial_location_calculator.rst | 6 +- examples/16_device_queue_event.py | 2 +- examples/17_video_mobilenet.py | 4 +- examples/19_mono_camera_control.py | 2 +- .../22_1_tiny_yolo_v3_device_side_decoding.py | 4 +- .../22_2_tiny_yolo_v4_device_side_decoding.py | 4 +- examples/23_autoexposure_roi.py | 4 +- examples/24_opencv_support.py | 2 +- examples/25_system_information.py | 4 +- examples/26_1_spatial_mobilenet.py | 8 +- examples/26_2_spatial_mobilenet_mono.py | 6 +- examples/26_3_spatial_tiny_yolo.py | 14 +- examples/27_spatial_location_calculator.py | 140 +++++++++--------- examples/28_camera_video_example.py | 2 +- 20 files changed, 104 insertions(+), 139 deletions(-) diff --git a/docs/source/samples/21_mobilenet_decoding_on_device.rst b/docs/source/samples/21_mobilenet_decoding_on_device.rst index 7c4f30520..c0f945164 100644 --- a/docs/source/samples/21_mobilenet_decoding_on_device.rst +++ b/docs/source/samples/21_mobilenet_decoding_on_device.rst @@ -1,32 +1,6 @@ 21 - RGB & MobilenetSSD decoding on device ========================================== -This example shows how to run MobileNetv2SSD on the RGB input frame, and how to display both the RGB -preview and the metadata results from the MobileNetv2SSD on the preview. It's similar to example '08_rgb_mobilenet' except -decoding is done on Myriad instead on the host. - - -setConfidenceThreshold - confidence threshold above which objects are detected - -Demo -#### - - -Setup -##### - -.. include:: /includes/install_from_pypi.rst - -This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from -`here `__ - -Source code -########### - -Also `available on GitHub `__ - -.. literalinclude:: ../../../examples/21_mobilenet_device_side_decoding.py - :language: python - :linenos: +This demo was migrated to :ref:`08 - RGB & MobilenetSSD` .. include:: /includes/footer-short.rst diff --git a/docs/source/samples/23_autoexposure_roi.rst b/docs/source/samples/23_autoexposure_roi.rst index 53bd71727..2b513067e 100644 --- a/docs/source/samples/23_autoexposure_roi.rst +++ b/docs/source/samples/23_autoexposure_roi.rst @@ -18,8 +18,8 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from -`here `__ +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_5shave.blob` file) to work - you can download it from +`here `__ Usage ##### diff --git a/docs/source/samples/24_opencv_support.rst b/docs/source/samples/24_opencv_support.rst index 4316e0208..bb69cf556 100644 --- a/docs/source/samples/24_opencv_support.rst +++ b/docs/source/samples/24_opencv_support.rst @@ -10,9 +10,6 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from -`here `__ - Source code ########### diff --git a/docs/source/samples/25_system_information.rst b/docs/source/samples/25_system_information.rst index fa89e19f2..65b21b81a 100644 --- a/docs/source/samples/25_system_information.rst +++ b/docs/source/samples/25_system_information.rst @@ -9,8 +9,6 @@ Setup .. include:: /includes/install_from_pypi.rst -For additional information, please follow :ref:`Python API installation guide ` - Source code ########### diff --git a/docs/source/samples/26_1_spatial_mobilenet.rst b/docs/source/samples/26_1_spatial_mobilenet.rst index 26b49fb86..5677538b3 100644 --- a/docs/source/samples/26_1_spatial_mobilenet.rst +++ b/docs/source/samples/26_1_spatial_mobilenet.rst @@ -18,7 +18,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/26_2_spatial_mobilenet_mono.rst b/docs/source/samples/26_2_spatial_mobilenet_mono.rst index 89a9eb08d..ac519d2e2 100644 --- a/docs/source/samples/26_2_spatial_mobilenet_mono.rst +++ b/docs/source/samples/26_2_spatial_mobilenet_mono.rst @@ -18,7 +18,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/27_spatial_location_calculator.rst b/docs/source/samples/27_spatial_location_calculator.rst index deefae8e0..92c607967 100644 --- a/docs/source/samples/27_spatial_location_calculator.rst +++ b/docs/source/samples/27_spatial_location_calculator.rst @@ -1,7 +1,7 @@ 27 - Spatial location calculator ================================ -This example shows how to retrieve spatial location data (X,Y,Z) on a runtime configurable ROI. +This example shows how to retrieve spatial location data (X,Y,Z) on a runtime configurable ROI. You can move the ROI using WASD keys. X,Y,Z coordinates are relative to the center of depth map. @@ -16,10 +16,6 @@ Setup .. include:: /includes/install_from_pypi.rst - -This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from -`here `__ - Source code ########### diff --git a/examples/16_device_queue_event.py b/examples/16_device_queue_event.py index 419e6fc35..132549341 100755 --- a/examples/16_device_queue_event.py +++ b/examples/16_device_queue_event.py @@ -48,7 +48,7 @@ # because queues can be set to non-blocking (overwriting) behavior message = device.getOutputQueue(queueName).get() - # display arrived frames + # Display arrived frames if type(message) == dai.ImgFrame: cv2.imshow(queueName, message.getCvFrame()) diff --git a/examples/17_video_mobilenet.py b/examples/17_video_mobilenet.py index d42bca9e4..a5691ec05 100755 --- a/examples/17_video_mobilenet.py +++ b/examples/17_video_mobilenet.py @@ -39,14 +39,14 @@ "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() - # Output queue will be used to get nn data from the video frames. # Input queue will be used to send video frames to the device. qIn = device.getInputQueue(name="inFrame") + # Output queue will be used to get nn data from the video frames. qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False) frame = None diff --git a/examples/19_mono_camera_control.py b/examples/19_mono_camera_control.py index d59cf3737..54ef0ba1f 100755 --- a/examples/19_mono_camera_control.py +++ b/examples/19_mono_camera_control.py @@ -41,7 +41,7 @@ def clamp(num, v0, v1): return max(v0, min(num, v1)) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/22_1_tiny_yolo_v3_device_side_decoding.py b/examples/22_1_tiny_yolo_v3_device_side_decoding.py index 36b005225..3dd8cbc50 100755 --- a/examples/22_1_tiny_yolo_v3_device_side_decoding.py +++ b/examples/22_1_tiny_yolo_v3_device_side_decoding.py @@ -14,7 +14,7 @@ import numpy as np import time -# tiny yolo v3 label texts +# Tiny yolo v3 label texts labelMap = [ "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", @@ -75,7 +75,7 @@ detectionNetwork.out.link(nnOut.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/22_2_tiny_yolo_v4_device_side_decoding.py b/examples/22_2_tiny_yolo_v4_device_side_decoding.py index 4f638ee55..d33d95286 100755 --- a/examples/22_2_tiny_yolo_v4_device_side_decoding.py +++ b/examples/22_2_tiny_yolo_v4_device_side_decoding.py @@ -46,7 +46,7 @@ camRgb.setInterleaved(False) camRgb.setFps(40) -# network specific settings +# Network specific settings detectionNetwork = pipeline.createYoloDetectionNetwork() detectionNetwork.setConfidenceThreshold(0.5) detectionNetwork.setNumClasses(80) @@ -74,7 +74,7 @@ detectionNetwork.out.link(nnOut.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/23_autoexposure_roi.py b/examples/23_autoexposure_roi.py index d565334f8..4ef538503 100755 --- a/examples/23_autoexposure_roi.py +++ b/examples/23_autoexposure_roi.py @@ -100,7 +100,7 @@ def bboxToRoi(bbox): return roi -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -114,7 +114,7 @@ def bboxToRoi(bbox): nnRegion = True region = AutoExposureRegion() - + # nn data (bounding box locations) are in <0..1> range - they need to be normalized with frame width/height def frameNorm(frame, bbox): normVals = np.full(len(bbox), frame.shape[0]) diff --git a/examples/24_opencv_support.py b/examples/24_opencv_support.py index 891bb09eb..81c9121f5 100755 --- a/examples/24_opencv_support.py +++ b/examples/24_opencv_support.py @@ -23,7 +23,7 @@ camRgb.preview.link(xoutPreview.input) camRgb.video.link(xoutVideo.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/25_system_information.py b/examples/25_system_information.py index cc118b3cb..2304a4cc8 100755 --- a/examples/25_system_information.py +++ b/examples/25_system_information.py @@ -27,7 +27,7 @@ def print_sys_info(info): linkOut.setStreamName("sysinfo") sys_logger.out.link(linkOut.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -36,7 +36,7 @@ def print_sys_info(info): q_sysinfo = device.getOutputQueue(name="sysinfo", maxSize=4, blocking=False) while True: - info = q_sysinfo.get() # blocking call, will wait until a new data has arrived + info = q_sysinfo.get() # Blocking call, will wait until a new data has arrived print_sys_info(info) if cv2.waitKey(1) == ord('q'): diff --git a/examples/26_1_spatial_mobilenet.py b/examples/26_1_spatial_mobilenet.py index e0f522b11..1facd9b66 100755 --- a/examples/26_1_spatial_mobilenet.py +++ b/examples/26_1_spatial_mobilenet.py @@ -54,7 +54,7 @@ monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) -# setting node configs +# Setting node configs stereo.setOutputDepth(True) stereo.setConfidenceThreshold(255) @@ -82,7 +82,7 @@ stereo.depth.link(spatialDetectionNetwork.inputDepth) spatialDetectionNetwork.passthroughDepth.link(xoutDepth.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -137,11 +137,11 @@ cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) - # if the frame is available, draw bounding boxes on it and show the frame + # If the frame is available, draw bounding boxes on it and show the frame height = frame.shape[0] width = frame.shape[1] for detection in detections: - # denormalize bounding box + # Denormalize bounding box x1 = int(detection.xmin * width) x2 = int(detection.xmax * width) y1 = int(detection.ymin * height) diff --git a/examples/26_2_spatial_mobilenet_mono.py b/examples/26_2_spatial_mobilenet_mono.py index 7e17bc2a0..a6bc40f25 100755 --- a/examples/26_2_spatial_mobilenet_mono.py +++ b/examples/26_2_spatial_mobilenet_mono.py @@ -85,7 +85,7 @@ stereo.depth.link(spatialDetectionNetwork.inputDepth) spatialDetectionNetwork.passthroughDepth.link(xoutDepth.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -142,7 +142,7 @@ if flipRectified: rectifiedRight = cv2.flip(rectifiedRight, 1) - # if the rectifiedRight is available, draw bounding boxes on it and show the rectifiedRight + # If the rectifiedRight is available, draw bounding boxes on it and show the rectifiedRight height = rectifiedRight.shape[0] width = rectifiedRight.shape[1] for detection in detections: @@ -150,7 +150,7 @@ swap = detection.xmin detection.xmin = 1 - detection.xmax detection.xmax = 1 - swap - # denormalize bounding box + # Denormalize bounding box x1 = int(detection.xmin * width) x2 = int(detection.xmax * width) y1 = int(detection.ymin * height) diff --git a/examples/26_3_spatial_tiny_yolo.py b/examples/26_3_spatial_tiny_yolo.py index 3c1c18229..2da9e706c 100755 --- a/examples/26_3_spatial_tiny_yolo.py +++ b/examples/26_3_spatial_tiny_yolo.py @@ -13,7 +13,7 @@ Can be used for tiny-yolo-v3 or tiny-yolo-v4 networks ''' -# tiny yolo v3/4 label texts +# Tiny yolo v3/4 label texts labelMap = [ "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", @@ -32,7 +32,7 @@ syncNN = True # Get argument first -nnBlobPath = str((Path(__file__).parent / Path('models/mobilenet-ssd_openvino_2021.2_6shave.blob')).resolve().absolute()) +nnBlobPath = str((Path(__file__).parent / Path('models/tiny-yolo-v4_openvino_2021.2_6shave.blob')).resolve().absolute()) if len(sys.argv) > 1: nnBlobPath = sys.argv[1] @@ -77,7 +77,7 @@ spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5) spatialDetectionNetwork.setDepthLowerThreshold(100) spatialDetectionNetwork.setDepthUpperThreshold(5000) -# yolo specific parameters +# Yolo specific parameters spatialDetectionNetwork.setNumClasses(80) spatialDetectionNetwork.setCoordinateSize(4) spatialDetectionNetwork.setAnchors(np.array([10,14, 23,27, 37,58, 81,82, 135,169, 344,319])) @@ -101,7 +101,7 @@ stereo.depth.link(spatialDetectionNetwork.inputDepth) spatialDetectionNetwork.passthroughDepth.link(xoutDepth.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -131,7 +131,7 @@ fps = counter / (current_time - startTime) counter = 0 startTime = current_time - + frame = inPreview.getCvFrame() depthFrame = depth.getFrame() @@ -156,11 +156,11 @@ cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) - # if the frame is available, draw bounding boxes on it and show the frame + # If the frame is available, draw bounding boxes on it and show the frame height = frame.shape[0] width = frame.shape[1] for detection in detections: - # denormalize bounding box + # Denormalize bounding box x1 = int(detection.xmin * width) x2 = int(detection.xmax * width) y1 = int(detection.ymin * height) diff --git a/examples/27_spatial_location_calculator.py b/examples/27_spatial_location_calculator.py index 4b46fb12e..0b696c287 100755 --- a/examples/27_spatial_location_calculator.py +++ b/examples/27_spatial_location_calculator.py @@ -59,73 +59,73 @@ spatialLocationCalculator.out.link(xoutSpatialData.input) xinSpatialCalcConfig.out.link(spatialLocationCalculator.inputConfig) -# Pipeline defined, now the device is assigned and pipeline is started -device = dai.Device(pipeline) -device.startPipeline() - -# Output queue will be used to get the depth frames from the outputs defined above -depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False) -spatialCalcQueue = device.getOutputQueue(name="spatialData", maxSize=4, blocking=False) -spatialCalcConfigInQueue = device.getInputQueue("spatialCalcConfig") - -color = (255, 255, 255) - -print("Use WASD keys to move ROI!") - -while True: - inDepth = depthQueue.get() # blocking call, will wait until a new data has arrived - inDepthAvg = spatialCalcQueue.get() # blocking call, will wait until a new data has arrived - - depthFrame = inDepth.getFrame() - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) - depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) - - spatialData = inDepthAvg.getSpatialLocations() - for depthData in spatialData: - roi = depthData.config.roi - roi = roi.denormalize(width=depthFrameColor.shape[1], height=depthFrameColor.shape[0]) - xmin = int(roi.topLeft().x) - ymin = int(roi.topLeft().y) - xmax = int(roi.bottomRight().x) - ymax = int(roi.bottomRight().y) - - fontType = cv2.FONT_HERSHEY_TRIPLEX - cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) - cv2.putText(depthFrameColor, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymin + 20), fontType, 0.5, color) - cv2.putText(depthFrameColor, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymin + 35), fontType, 0.5, color) - cv2.putText(depthFrameColor, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymin + 50), fontType, 0.5, color) - - - cv2.imshow("depth", depthFrameColor) - - newConfig = False - key = cv2.waitKey(1) - if key == ord('q'): - break - elif key == ord('w'): - if topLeft.y - stepSize >= 0: - topLeft.y -= stepSize - bottomRight.y -= stepSize - newConfig = True - elif key == ord('a'): - if topLeft.x - stepSize >= 0: - topLeft.x -= stepSize - bottomRight.x -= stepSize - newConfig = True - elif key == ord('s'): - if bottomRight.y + stepSize <= 1: - topLeft.y += stepSize - bottomRight.y += stepSize - newConfig = True - elif key == ord('d'): - if bottomRight.x + stepSize <= 1: - topLeft.x += stepSize - bottomRight.x += stepSize - newConfig = True - - if newConfig: - config.roi = dai.Rect(topLeft, bottomRight) - cfg = dai.SpatialLocationCalculatorConfig() - cfg.addROI(config) - spatialCalcConfigInQueue.send(cfg) \ No newline at end of file +# Pipeline is defined, now we can connect to the device +with dai.Device(pipeline) as device: + device.startPipeline() + + # Output queue will be used to get the depth frames from the outputs defined above + depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False) + spatialCalcQueue = device.getOutputQueue(name="spatialData", maxSize=4, blocking=False) + spatialCalcConfigInQueue = device.getInputQueue("spatialCalcConfig") + + color = (255, 255, 255) + + print("Use WASD keys to move ROI!") + + while True: + inDepth = depthQueue.get() # Blocking call, will wait until a new data has arrived + inDepthAvg = spatialCalcQueue.get() # Blocking call, will wait until a new data has arrived + + depthFrame = inDepth.getFrame() + depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) + depthFrameColor = cv2.equalizeHist(depthFrameColor) + depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) + + spatialData = inDepthAvg.getSpatialLocations() + for depthData in spatialData: + roi = depthData.config.roi + roi = roi.denormalize(width=depthFrameColor.shape[1], height=depthFrameColor.shape[0]) + xmin = int(roi.topLeft().x) + ymin = int(roi.topLeft().y) + xmax = int(roi.bottomRight().x) + ymax = int(roi.bottomRight().y) + + fontType = cv2.FONT_HERSHEY_TRIPLEX + cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) + cv2.putText(depthFrameColor, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymin + 20), fontType, 0.5, color) + cv2.putText(depthFrameColor, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymin + 35), fontType, 0.5, color) + cv2.putText(depthFrameColor, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymin + 50), fontType, 0.5, color) + + + cv2.imshow("depth", depthFrameColor) + + newConfig = False + key = cv2.waitKey(1) + if key == ord('q'): + break + elif key == ord('w'): + if topLeft.y - stepSize >= 0: + topLeft.y -= stepSize + bottomRight.y -= stepSize + newConfig = True + elif key == ord('a'): + if topLeft.x - stepSize >= 0: + topLeft.x -= stepSize + bottomRight.x -= stepSize + newConfig = True + elif key == ord('s'): + if bottomRight.y + stepSize <= 1: + topLeft.y += stepSize + bottomRight.y += stepSize + newConfig = True + elif key == ord('d'): + if bottomRight.x + stepSize <= 1: + topLeft.x += stepSize + bottomRight.x += stepSize + newConfig = True + + if newConfig: + config.roi = dai.Rect(topLeft, bottomRight) + cfg = dai.SpatialLocationCalculatorConfig() + cfg.addROI(config) + spatialCalcConfigInQueue.send(cfg) \ No newline at end of file diff --git a/examples/28_camera_video_example.py b/examples/28_camera_video_example.py index e7c949853..d3f9be095 100644 --- a/examples/28_camera_video_example.py +++ b/examples/28_camera_video_example.py @@ -21,7 +21,7 @@ colorCam.video.link(xoutVideo.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() From 7017e77b7b1ddc33cfb199d575d3cacf1bd26d40 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Thu, 1 Apr 2021 21:09:30 +0200 Subject: [PATCH 3/3] removed 21 from the index (since we migrated the demo to 08), added examples/models into gitignore (since blobs are stored there) --- .gitignore | 3 +++ docs/source/index.rst | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index e3e51e27a..aac452dce 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,6 @@ _builds/ #ci wheelhouse/ + +# Example blobs/files +examples/models/ \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index b073c9edd..77c7fbe89 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -90,7 +90,6 @@ Now, pick a tutorial or code sample and start utilizing Gen2 capabilities samples/16_device_queue_event.rst samples/17_video_mobilenet.rst samples/18_rgb_encoding_mobilenet.rst - samples/21_mobilenet_decoding_on_device.rst samples/22_1_tiny_yolo_v3_decoding_on_device.rst samples/22_2_tiny_yolo_v4_decoding_on_device.rst samples/23_autoexposure_roi.rst