From 6fae24be6a12514b31fa5fa48725312c783f6563 Mon Sep 17 00:00:00 2001 From: Erik Date: Wed, 31 Mar 2021 17:50:40 +0200 Subject: [PATCH 01/36] Update docker-compose.yml specified version for docker-compose --- docs/docker-compose.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/docker-compose.yml b/docs/docker-compose.yml index e2f450637..dc27094a3 100644 --- a/docs/docker-compose.yml +++ b/docs/docker-compose.yml @@ -1,3 +1,4 @@ +version: "3" services: docs: build: From 23c449e2a9f7972f78927713a28f5b9cb74ca48b Mon Sep 17 00:00:00 2001 From: Erol444 Date: Thu, 1 Apr 2021 15:52:13 +0200 Subject: [PATCH 02/36] fixed minor typos, comments, docs --- docs/source/samples/04_rgb_encoding.rst | 2 +- docs/source/samples/05_rgb_mono_encoding.rst | 2 +- .../samples/06_rgb_full_resolution_saver.rst | 2 +- docs/source/samples/09_mono_mobilenet.rst | 2 +- .../samples/10_mono_depth_mobilenetssd.rst | 2 +- .../11_rgb_encoding_mono_mobilenet.rst | 6 ++-- .../12_rgb_encoding_mono_mobilenet_depth.rst | 8 ++--- docs/source/samples/13_encoding_max_limit.rst | 4 +-- docs/source/samples/15_rgb_mobilenet_4k.rst | 6 ++-- docs/source/samples/17_video_mobilenet.rst | 4 +-- .../samples/18_rgb_encoding_mobilenet.rst | 4 +-- .../21_mobilenet_decoding_on_device.rst | 2 +- .../22_1_tiny_yolo_v3_decoding_on_device.rst | 2 +- docs/source/samples/24_opencv_support.rst | 2 +- docs/source/samples/25_system_information.rst | 2 +- .../source/samples/26_1_spatial_mobilenet.rst | 2 +- .../samples/26_2_spatial_mobilenet_mono.rst | 4 +-- .../source/samples/26_3_spatial_tiny_yolo.rst | 4 +-- .../27_spatial_location_calculator.rst | 2 +- examples/01_rgb_preview.py | 2 +- examples/02_mono_preview.py | 4 +-- examples/03_depth_preview.py | 29 ++--------------- examples/04_rgb_encoding.py | 6 ++-- examples/05_rgb_mono_encoding.py | 2 +- examples/06_rgb_full_resolution_saver.py | 4 +-- examples/07_mono_full_resolution_saver.py | 12 +++---- examples/08_rgb_mobilenet.py | 8 ++--- examples/09_mono_mobilenet.py | 4 +-- examples/10_mono_depth_mobilenetssd.py | 32 +++---------------- examples/11_rgb_encoding_mono_mobilenet.py | 2 +- .../12_rgb_encoding_mono_mobilenet_depth.py | 2 +- examples/13_encoding_max_limit.py | 4 +-- examples/14_color_camera_control.py | 2 +- examples/15_rgb_mobilenet_4k.py | 6 ++-- examples/16_device_queue_event.py | 10 +++--- examples/17_video_mobilenet.py | 16 +++++----- examples/19_mono_camera_control.py | 2 +- examples/23_autoexposure_roi.py | 2 +- 38 files changed, 83 insertions(+), 128 deletions(-) diff --git a/docs/source/samples/04_rgb_encoding.rst b/docs/source/samples/04_rgb_encoding.rst index ed61d54aa..8a4e0f451 100644 --- a/docs/source/samples/04_rgb_encoding.rst +++ b/docs/source/samples/04_rgb_encoding.rst @@ -9,7 +9,7 @@ encoded video over XLINK to the host, saving it to disk as a video file. Pressing Ctrl+C will stop the recording and then convert it using ffmpeg into an mp4 to make it playable. Note that ffmpeg will need to be installed and runnable for the conversion to mp4 to succeed. -Be careful, this example saves encoded video to your host storage. So if you leave them running, +Be careful, this example saves encoded video to your host storage. So if you leave it running, you could fill up your storage on your host. diff --git a/docs/source/samples/05_rgb_mono_encoding.rst b/docs/source/samples/05_rgb_mono_encoding.rst index 96b842947..2ca6a3664 100644 --- a/docs/source/samples/05_rgb_mono_encoding.rst +++ b/docs/source/samples/05_rgb_mono_encoding.rst @@ -8,7 +8,7 @@ each, all at 30FPS. Each encoded video stream is transferred over XLINK and save Pressing Ctrl+C will stop the recording and then convert it using ffmpeg into an mp4 to make it playable. Note that ffmpeg will need to be installed and runnable for the conversion to mp4 to succeed. -Be careful, this example saves encoded video to your host storage. So if you leave them running, +Be careful, this example saves encoded video to your host storage. So if you leave it running, you could fill up your storage on your host. Demo diff --git a/docs/source/samples/06_rgb_full_resolution_saver.rst b/docs/source/samples/06_rgb_full_resolution_saver.rst index be2e54d36..76fc66a41 100644 --- a/docs/source/samples/06_rgb_full_resolution_saver.rst +++ b/docs/source/samples/06_rgb_full_resolution_saver.rst @@ -8,7 +8,7 @@ uncompressed frames, processed by ISP, and raw - BayerRG (R_Gr_Gb_B), as read fr 10-bit packed. See here for the pull request on this capability. Be careful, this example saves full resolution .png pictures to your host storage. So if you leave -them running, you could fill up your storage on your host. +it running, you could fill up your storage on your host. Demo #### diff --git a/docs/source/samples/09_mono_mobilenet.rst b/docs/source/samples/09_mono_mobilenet.rst index 9cb99f9c5..8e6d367d7 100644 --- a/docs/source/samples/09_mono_mobilenet.rst +++ b/docs/source/samples/09_mono_mobilenet.rst @@ -19,7 +19,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ diff --git a/docs/source/samples/10_mono_depth_mobilenetssd.rst b/docs/source/samples/10_mono_depth_mobilenetssd.rst index da36998b5..aac1c1bba 100644 --- a/docs/source/samples/10_mono_depth_mobilenetssd.rst +++ b/docs/source/samples/10_mono_depth_mobilenetssd.rst @@ -19,7 +19,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/11_rgb_encoding_mono_mobilenet.rst b/docs/source/samples/11_rgb_encoding_mono_mobilenet.rst index 925c15386..a870d4145 100644 --- a/docs/source/samples/11_rgb_encoding_mono_mobilenet.rst +++ b/docs/source/samples/11_rgb_encoding_mono_mobilenet.rst @@ -3,13 +3,13 @@ This example shows how to configure the depthai video encoder in h.265 format to encode the RGB camera input at Full-HD resolution at 30FPS, and transfers the encoded video over XLINK to the host, -saving it to disk as a video file. In the same time, a MobileNetv2SSD network is ran on the +saving it to disk as a video file. At the same time, a MobileNetv2SSD network is ran on the frames from right grayscale camera Pressing Ctrl+C will stop the recording and then convert it using ffmpeg into an mp4 to make it playable. Note that ffmpeg will need to be installed and runnable for the conversion to mp4 to succeed. -Be careful, this example saves encoded video to your host storage. So if you leave them running, +Be careful, this example saves encoded video to your host storage. So if you leave it running, you could fill up your storage on your host. Demo @@ -26,7 +26,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/12_rgb_encoding_mono_mobilenet_depth.rst b/docs/source/samples/12_rgb_encoding_mono_mobilenet_depth.rst index cca2e8a76..c7abff6f7 100644 --- a/docs/source/samples/12_rgb_encoding_mono_mobilenet_depth.rst +++ b/docs/source/samples/12_rgb_encoding_mono_mobilenet_depth.rst @@ -1,9 +1,9 @@ 12 - RGB Encoding & Mono with MobilenetSSD & Depth -================================================ +================================================== This example shows how to configure the depthai video encoder in h.265 format to encode the RGB camera input at Full-HD resolution at 30FPS, and transfers the encoded video over XLINK to the host, -saving it to disk as a video file. In the same time, a MobileNetv2SSD network is ran on the +saving it to disk as a video file. At the same time, a MobileNetv2SSD network is ran on the frames from right grayscale camera, while the application also displays the depth map produced by both of the grayscale cameras. Note that disparity is used in this case, as it colorizes in a more intuitive way. @@ -11,7 +11,7 @@ intuitive way. Pressing Ctrl+C will stop the recording and then convert it using ffmpeg into an mp4 to make it playable. Note that ffmpeg will need to be installed and runnable for the conversion to mp4 to succeed. -Be careful, this example saves encoded video to your host storage. So if you leave them running, +Be careful, this example saves encoded video to your host storage. So if you leave it running, you could fill up your storage on your host. Demo @@ -28,7 +28,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/13_encoding_max_limit.rst b/docs/source/samples/13_encoding_max_limit.rst index 89c6933db..7b6604e92 100644 --- a/docs/source/samples/13_encoding_max_limit.rst +++ b/docs/source/samples/13_encoding_max_limit.rst @@ -1,5 +1,5 @@ 13 - Encoding Max Limit -===================== +======================= This example shows how to set up the encoder node to encode the RGB camera and both grayscale cameras (of DepthAI/OAK-D) at the same time, having all encoder parameters set to maximum quality and FPS. @@ -9,7 +9,7 @@ Each encoded video stream is transferred over XLINK and saved to a respective fi Pressing Ctrl+C will stop the recording and then convert it using ffmpeg into an mp4 to make it playable. Note that ffmpeg will need to be installed and runnable for the conversion to mp4 to succeed. -Be careful, this example saves encoded video to your host storage. So if you leave them running, +Be careful, this example saves encoded video to your host storage. So if you leave it running, you could fill up your storage on your host. Demo diff --git a/docs/source/samples/15_rgb_mobilenet_4k.rst b/docs/source/samples/15_rgb_mobilenet_4k.rst index 7639a1a7a..77a1e770f 100644 --- a/docs/source/samples/15_rgb_mobilenet_4k.rst +++ b/docs/source/samples/15_rgb_mobilenet_4k.rst @@ -1,9 +1,9 @@ 15 - 4K RGB MobileNetSSD ======================== -This example shows how to MobileNetv2SSD on the RGB input frame, and how to display both the RGB +This example shows how to run MobileNetv2SSD on the RGB input frame, and how to display both the RGB preview and the metadata results from the MobileNetv2SSD on the preview. -The preview size is set to 4K resolution +The preview size is set to 4K resolution. Demo #### @@ -20,7 +20,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_5shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/17_video_mobilenet.rst b/docs/source/samples/17_video_mobilenet.rst index 4b51a2e4f..1d1f97b37 100644 --- a/docs/source/samples/17_video_mobilenet.rst +++ b/docs/source/samples/17_video_mobilenet.rst @@ -21,9 +21,9 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) and prerecorded video +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_8shave.blob` file) and prerecorded video (:code:`construction_vest.mp4` file) to work - you can download them -here: `mobilenet.blob `__ +here: `mobilenet-ssd_openvino_2021.2_8shave.blob `__ and `construction_vest.mp4 `__ Source code diff --git a/docs/source/samples/18_rgb_encoding_mobilenet.rst b/docs/source/samples/18_rgb_encoding_mobilenet.rst index 86e6e25d3..95ca4c0d3 100644 --- a/docs/source/samples/18_rgb_encoding_mobilenet.rst +++ b/docs/source/samples/18_rgb_encoding_mobilenet.rst @@ -9,7 +9,7 @@ frames from the same RGB camera that is used for encoding Pressing Ctrl+C will stop the recording and then convert it using ffmpeg into an mp4 to make it playable. Note that ffmpeg will need to be installed and runnable for the conversion to mp4 to succeed. -Be careful, this example saves encoded video to your host storage. So if you leave them running, +Be careful, this example saves encoded video to your host storage. So if you leave it running, you could fill up your storage on your host. Demo @@ -26,7 +26,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/21_mobilenet_decoding_on_device.rst b/docs/source/samples/21_mobilenet_decoding_on_device.rst index 291dcb0f5..7c4f30520 100644 --- a/docs/source/samples/21_mobilenet_decoding_on_device.rst +++ b/docs/source/samples/21_mobilenet_decoding_on_device.rst @@ -17,7 +17,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/22_1_tiny_yolo_v3_decoding_on_device.rst b/docs/source/samples/22_1_tiny_yolo_v3_decoding_on_device.rst index b8609f35e..2e4a99d67 100644 --- a/docs/source/samples/22_1_tiny_yolo_v3_decoding_on_device.rst +++ b/docs/source/samples/22_1_tiny_yolo_v3_decoding_on_device.rst @@ -21,7 +21,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`tiny-yolo-v3_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/24_opencv_support.rst b/docs/source/samples/24_opencv_support.rst index 427f7cc44..4316e0208 100644 --- a/docs/source/samples/24_opencv_support.rst +++ b/docs/source/samples/24_opencv_support.rst @@ -1,5 +1,5 @@ 24 - OpenCV support -========================= +=================== This example shows API which exposes both numpy and OpenCV compatible image types for eaiser usage. It uses ColorCamera node to retrieve both BGR interleaved 'preview' and NV12 encoded 'video' frames. diff --git a/docs/source/samples/25_system_information.rst b/docs/source/samples/25_system_information.rst index 9ea02ad69..fa89e19f2 100644 --- a/docs/source/samples/25_system_information.rst +++ b/docs/source/samples/25_system_information.rst @@ -1,5 +1,5 @@ 25 - System information -========================= +======================= This example shows how to get system information (memory usage, cpu usage and temperature) from the board. diff --git a/docs/source/samples/26_1_spatial_mobilenet.rst b/docs/source/samples/26_1_spatial_mobilenet.rst index 2b0d52654..26b49fb86 100644 --- a/docs/source/samples/26_1_spatial_mobilenet.rst +++ b/docs/source/samples/26_1_spatial_mobilenet.rst @@ -2,7 +2,7 @@ =========================================== This example shows how to run MobileNetv2SSD on the RGB input frame, and how to display both the RGB -preview, detections, depth map and spatial information (X,Y,Z). It's similar to example +preview, detections, depth map and spatial information (X,Y,Z). It's similar to example '21_mobilenet_decoding_on_device' except it has spatial data. X,Y,Z coordinates are relative to the center of depth map. diff --git a/docs/source/samples/26_2_spatial_mobilenet_mono.rst b/docs/source/samples/26_2_spatial_mobilenet_mono.rst index 86c340aac..89a9eb08d 100644 --- a/docs/source/samples/26_2_spatial_mobilenet_mono.rst +++ b/docs/source/samples/26_2_spatial_mobilenet_mono.rst @@ -1,8 +1,8 @@ 26.2 - MONO & MobilenetSSD with spatial data ============================================ -This example shows how to run MobileNetv2SSD on the rectified right input frame, and -how to display both the preview, detections, depth map and spatial information (X,Y,Z). +This example shows how to run MobileNetv2SSD on the rectified right input frame, and +how to display both the preview, detections, depth map and spatial information (X,Y,Z). It's similar to example '21_mobilenet_decoding_on_device' except it has spatial data. X,Y,Z coordinates are relative to the center of depth map. diff --git a/docs/source/samples/26_3_spatial_tiny_yolo.rst b/docs/source/samples/26_3_spatial_tiny_yolo.rst index 91c4ce930..29c13ba3b 100644 --- a/docs/source/samples/26_3_spatial_tiny_yolo.rst +++ b/docs/source/samples/26_3_spatial_tiny_yolo.rst @@ -1,8 +1,8 @@ 26.3 - RGB & TinyYolo with spatial data -=========================================== +======================================= This example shows how to run TinyYoloV3 and v4 on the RGB input frame, and how to display both the RGB -preview, detections, depth map and spatial information (X,Y,Z). It's similar to example +preview, detections, depth map and spatial information (X,Y,Z). It's similar to example '26_1_spatial_mobilenet' except it is running TinyYolo network. X,Y,Z coordinates are relative to the center of depth map. diff --git a/docs/source/samples/27_spatial_location_calculator.rst b/docs/source/samples/27_spatial_location_calculator.rst index 5a41e034b..deefae8e0 100644 --- a/docs/source/samples/27_spatial_location_calculator.rst +++ b/docs/source/samples/27_spatial_location_calculator.rst @@ -17,7 +17,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/examples/01_rgb_preview.py b/examples/01_rgb_preview.py index ea7523ff2..eb4f70078 100755 --- a/examples/01_rgb_preview.py +++ b/examples/01_rgb_preview.py @@ -19,7 +19,7 @@ xoutRgb.setStreamName("rgb") camRgb.preview.link(xoutRgb.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/02_mono_preview.py b/examples/02_mono_preview.py index e5ae07e07..5d378c5b7 100755 --- a/examples/02_mono_preview.py +++ b/examples/02_mono_preview.py @@ -24,7 +24,7 @@ xoutRight.setStreamName('right') camRight.out.link(xoutRight.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -37,7 +37,7 @@ frameRight = None while True: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inLeft = qLeft.tryGet() inRight = qRight.tryGet() diff --git a/examples/03_depth_preview.py b/examples/03_depth_preview.py index ba0a55938..829b4a844 100755 --- a/examples/03_depth_preview.py +++ b/examples/03_depth_preview.py @@ -47,7 +47,7 @@ xout.setStreamName("disparity") depth.disparity.link(xout.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -59,32 +59,9 @@ inDepth = q.get() # blocking call, will wait until a new data has arrived frame = inDepth.getFrame() frame = cv2.normalize(frame, None, 0, 255, cv2.NORM_MINMAX) - frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET) - - # Uncomment one of these and comment the one given above - # to see visualisation in different color frames - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_BONE) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_AUTUMN) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_WINTER) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_RAINBOW) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_OCEAN) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_SUMMER) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_SPRING) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_COOL) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_HSV) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_HOT) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_PINK) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_PARULA) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_MAGMA) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_INFERNO) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_PLASMA) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_VIRIDIS) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_CIVIDIS) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_TWILIGHT) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_TWILIGHT_SHIFTED) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_TURBO) - #frame = cv2.applyColorMap(frame, cv2.COLORMAP_DEEPGREEN) + # Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html + frame = cv2.applyColorMap(frame, cv2.COLORMAP_JET) # frame is ready to be shown cv2.imshow("disparity", frame) diff --git a/examples/04_rgb_encoding.py b/examples/04_rgb_encoding.py index 28710e7c3..348b6bd46 100755 --- a/examples/04_rgb_encoding.py +++ b/examples/04_rgb_encoding.py @@ -20,7 +20,7 @@ videoOut.setStreamName('h265') videoEncoder.bitstream.link(videoOut.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -33,8 +33,8 @@ print("Press Ctrl+C to stop encoding...") try: while True: - h264Packet = q.get() # blocking call, will wait until a new data has arrived - h264Packet.getData().tofile(videoFile) # appends the packet data to the opened file + h264Packet = q.get() # Blocking call, will wait until a new data has arrived + h264Packet.getData().tofile(videoFile) # Appends the packet data to the opened file except KeyboardInterrupt: # Keyboard interrupt (Ctrl + C) detected pass diff --git a/examples/05_rgb_mono_encoding.py b/examples/05_rgb_mono_encoding.py index 11bc5c478..2c681ed8d 100755 --- a/examples/05_rgb_mono_encoding.py +++ b/examples/05_rgb_mono_encoding.py @@ -39,7 +39,7 @@ ve3.bitstream.link(ve3Out.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as dev: # Start pipeline dev.startPipeline() diff --git a/examples/06_rgb_full_resolution_saver.py b/examples/06_rgb_full_resolution_saver.py index 9e526cef1..e0a3725cb 100755 --- a/examples/06_rgb_full_resolution_saver.py +++ b/examples/06_rgb_full_resolution_saver.py @@ -29,7 +29,7 @@ videoEnc.bitstream.link(xoutJpeg.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -42,7 +42,7 @@ Path('06_data').mkdir(parents=True, exist_ok=True) while True: - inRgb = qRgb.tryGet() # non-blocking call, will return a new data that has arrived or None otherwise + inRgb = qRgb.tryGet() # Non-blocking call, will return a new data that has arrived or None otherwise if inRgb is not None: cv2.imshow("rgb", inRgb.getCvFrame()) diff --git a/examples/07_mono_full_resolution_saver.py b/examples/07_mono_full_resolution_saver.py index 83749f01c..a8bb6c886 100755 --- a/examples/07_mono_full_resolution_saver.py +++ b/examples/07_mono_full_resolution_saver.py @@ -19,11 +19,11 @@ xoutRight.setStreamName("right") camRight.out.link(xoutRight.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() - + # Output queue will be used to get the grayscale frames from the output defined above qRight = device.getOutputQueue(name="right", maxSize=4, blocking=False) @@ -31,12 +31,12 @@ Path('07_data').mkdir(parents=True, exist_ok=True) while True: - inRight = qRight.get() # blocking call, will wait until a new data has arrived - # data is originally represented as a flat 1D array, it needs to be converted into HxW form + inRight = qRight.get() # Blocking call, will wait until a new data has arrived + # Data is originally represented as a flat 1D array, it needs to be converted into HxW form frameRight = inRight.getCvFrame() - # frame is transformed and ready to be shown + # Frame is transformed and ready to be shown cv2.imshow("right", frameRight) - # after showing the frame, it's being stored inside a target directory as a PNG image + # After showing the frame, it's being stored inside a target directory as a PNG image cv2.imwrite(f"07_data/{int(time.time() * 10000)}.png", frameRight) if cv2.waitKey(1) == ord('q'): diff --git a/examples/08_rgb_mobilenet.py b/examples/08_rgb_mobilenet.py index d17104260..f30a2a572 100755 --- a/examples/08_rgb_mobilenet.py +++ b/examples/08_rgb_mobilenet.py @@ -47,7 +47,7 @@ "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -78,11 +78,11 @@ def displayFrame(name, frame): while True: if args.sync: - # use blocking get() call to catch frame and inference result synced + # Use blocking get() call to catch frame and inference result synced inRgb = qRgb.get() inDet = qDet.get() else: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inRgb = qRgb.tryGet() inDet = qDet.tryGet() @@ -95,7 +95,7 @@ def displayFrame(name, frame): detections = inDet.detections counter += 1 - # if the frame is available, draw bounding boxes on it and show the frame + # If the frame is available, draw bounding boxes on it and show the frame if frame is not None: displayFrame("rgb", frame) diff --git a/examples/09_mono_mobilenet.py b/examples/09_mono_mobilenet.py index a98bec289..63602f284 100755 --- a/examples/09_mono_mobilenet.py +++ b/examples/09_mono_mobilenet.py @@ -48,7 +48,7 @@ labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -76,7 +76,7 @@ def displayFrame(name, frame): while True: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inRight = qRight.tryGet() inDet = qDet.tryGet() diff --git a/examples/10_mono_depth_mobilenetssd.py b/examples/10_mono_depth_mobilenetssd.py index eac21f9fb..c55ea3300 100755 --- a/examples/10_mono_depth_mobilenetssd.py +++ b/examples/10_mono_depth_mobilenetssd.py @@ -66,7 +66,7 @@ labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -89,7 +89,7 @@ def frameNorm(frame, bbox): return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int) while True: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inRight = qRight.tryGet() inDet = qDet.tryGet() inDepth = qDepth.tryGet() @@ -101,35 +101,11 @@ def frameNorm(frame, bbox): detections = inDet.detections if inDepth is not None: + # Frame is transformed, the color map will be applied to highlight the depth info depthFrame = cv2.flip(inDepth.getFrame(), 1) - # frame is transformed, the color map will be applied to highlight the depth info + # Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_JET) - # Uncomment one of these and comment the one given above - # to see visualisation in different color frames - - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_BONE) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_AUTUMN) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_WINTER) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_RAINBOW) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_OCEAN) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_SUMMER) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_SPRING) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_COOL) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_HSV) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_HOT) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_PINK) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_PARULA) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_MAGMA) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_INFERNO) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_PLASMA) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_VIRIDIS) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_CIVIDIS) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_TWILIGHT) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_TWILIGHT_SHIFTED) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_TURBO) - # depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_DEEPGREEN) - if rightFrame is not None: for detection in detections: bbox = frameNorm(rightFrame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax)) diff --git a/examples/11_rgb_encoding_mono_mobilenet.py b/examples/11_rgb_encoding_mono_mobilenet.py index 98d75b31d..578bbb96b 100755 --- a/examples/11_rgb_encoding_mono_mobilenet.py +++ b/examples/11_rgb_encoding_mono_mobilenet.py @@ -60,7 +60,7 @@ "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/12_rgb_encoding_mono_mobilenet_depth.py b/examples/12_rgb_encoding_mono_mobilenet_depth.py index b1cff44f1..0de36b0d0 100755 --- a/examples/12_rgb_encoding_mono_mobilenet_depth.py +++ b/examples/12_rgb_encoding_mono_mobilenet_depth.py @@ -75,7 +75,7 @@ "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/13_encoding_max_limit.py b/examples/13_encoding_max_limit.py index ce076c70a..bd97d74b1 100755 --- a/examples/13_encoding_max_limit.py +++ b/examples/13_encoding_max_limit.py @@ -24,7 +24,7 @@ ve2Out.setStreamName('ve2Out') ve3Out.setStreamName('ve3Out') -#setting to 26fps will trigger error +# Setting to 26fps will trigger error ve1.setDefaultProfilePreset(1280, 720, 25, dai.VideoEncoderProperties.Profile.H264_MAIN) ve2.setDefaultProfilePreset(3840, 2160, 25, dai.VideoEncoderProperties.Profile.H265_MAIN) ve3.setDefaultProfilePreset(1280, 720, 25, dai.VideoEncoderProperties.Profile.H264_MAIN) @@ -39,7 +39,7 @@ ve3.bitstream.link(ve3Out.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as dev: # Prepare data queues diff --git a/examples/14_color_camera_control.py b/examples/14_color_camera_control.py index e9fe2532c..3d0aebc9c 100755 --- a/examples/14_color_camera_control.py +++ b/examples/14_color_camera_control.py @@ -62,7 +62,7 @@ def clamp(num, v0, v1): return max(v0, min(num, v1)) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as dev: # Get data queues diff --git a/examples/15_rgb_mobilenet_4k.py b/examples/15_rgb_mobilenet_4k.py index 4459339db..007745530 100755 --- a/examples/15_rgb_mobilenet_4k.py +++ b/examples/15_rgb_mobilenet_4k.py @@ -46,11 +46,11 @@ labelMap = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() - + # Output queues will be used to get the frames and nn data from the outputs defined above qVideo = device.getOutputQueue(name="video", maxSize=4, blocking=False) qPreview = device.getOutputQueue(name="preview", maxSize=4, blocking=False) @@ -79,7 +79,7 @@ def displayFrame(name, frame): print("Resize video window with mouse drag!") while True: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inVideo = qVideo.tryGet() inPreview = qPreview.tryGet() inDet = qDet.tryGet() diff --git a/examples/16_device_queue_event.py b/examples/16_device_queue_event.py index 553f05116..419e6fc35 100755 --- a/examples/16_device_queue_event.py +++ b/examples/16_device_queue_event.py @@ -1,7 +1,9 @@ #!/usr/bin/env python3 -# This example demonstrates use of queue events to block a thread until a message -# arrives to any (of the specified) queue +""" + This example demonstrates use of queue events to block a thread until a message + arrives to any (of the specified) queue +""" import cv2 import depthai as dai @@ -29,11 +31,11 @@ camMono.out.link(xoutMono.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() - + # Clear queue events device.getQueueEvents() diff --git a/examples/17_video_mobilenet.py b/examples/17_video_mobilenet.py index dca49608c..d42bca9e4 100755 --- a/examples/17_video_mobilenet.py +++ b/examples/17_video_mobilenet.py @@ -17,10 +17,9 @@ # Start defining a pipeline pipeline = dai.Pipeline() - -# Create neural network input -xinDet = pipeline.createXLinkIn() -xinDet.setStreamName("inDet") +# Create xLink input to which host will send frames from the video file +xinFrame = pipeline.createXLinkIn() +xinFrame.setStreamName("inFrame") # Define a neural network that will make predictions based on the source frames nn = pipeline.createMobileNetDetectionNetwork() @@ -28,7 +27,7 @@ nn.setBlobPath(nnPath) nn.setNumInferenceThreads(2) nn.input.setBlocking(False) -xinDet.out.link(nn.input) +xinFrame.out.link(nn.input) # Create output nnOut = pipeline.createXLinkOut() @@ -44,9 +43,10 @@ with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() - - # Output queues will be used to get the rgb frames and nn data from the outputs defined above - qIn = device.getInputQueue(name="inDet") + + # Output queue will be used to get nn data from the video frames. + # Input queue will be used to send video frames to the device. + qIn = device.getInputQueue(name="inFrame") qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False) frame = None diff --git a/examples/19_mono_camera_control.py b/examples/19_mono_camera_control.py index bb4eb076b..d59cf3737 100755 --- a/examples/19_mono_camera_control.py +++ b/examples/19_mono_camera_control.py @@ -68,7 +68,7 @@ def clamp(num, v0, v1): return max(v0, min(num, v1)) frameRight = None while True: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inLeft = qLeft.tryGet() inRight = qRight.tryGet() diff --git a/examples/23_autoexposure_roi.py b/examples/23_autoexposure_roi.py index 8ce0fa11a..d565334f8 100755 --- a/examples/23_autoexposure_roi.py +++ b/examples/23_autoexposure_roi.py @@ -132,7 +132,7 @@ def displayFrame(name, frame): cv2.imshow(name, frame) while True: - # instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise + # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inRgb = qRgb.tryGet() inDet = qDet.tryGet() From a77e43bcd82eccbc5683bf0f52ebc837ee0478ae Mon Sep 17 00:00:00 2001 From: Erik Date: Thu, 1 Apr 2021 18:02:28 +0200 Subject: [PATCH 03/36] Update multiple.rst added url to the depthai-experiments where we have demo code for running multiple devices on a host --- docs/source/tutorials/multiple.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/source/tutorials/multiple.rst b/docs/source/tutorials/multiple.rst index c5e6de0a8..41a056338 100644 --- a/docs/source/tutorials/multiple.rst +++ b/docs/source/tutorials/multiple.rst @@ -11,6 +11,11 @@ a `Raspberry Pi Compute Module Edition (BW1097) `__. The demo will find all devices connected to the host and display an RGB preview from each of them. + Dependencies ############ From c9f2275560291a365684c4af5ad49d5d410bddd0 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Thu, 1 Apr 2021 21:08:29 +0200 Subject: [PATCH 04/36] fixed minor typos, comments, added context manager in 27, fixed blob name in 26.3 --- .../21_mobilenet_decoding_on_device.rst | 28 +--- docs/source/samples/23_autoexposure_roi.rst | 4 +- docs/source/samples/24_opencv_support.rst | 3 - docs/source/samples/25_system_information.rst | 2 - .../source/samples/26_1_spatial_mobilenet.rst | 2 +- .../samples/26_2_spatial_mobilenet_mono.rst | 2 +- .../27_spatial_location_calculator.rst | 6 +- examples/16_device_queue_event.py | 2 +- examples/17_video_mobilenet.py | 4 +- examples/19_mono_camera_control.py | 2 +- .../22_1_tiny_yolo_v3_device_side_decoding.py | 4 +- .../22_2_tiny_yolo_v4_device_side_decoding.py | 4 +- examples/23_autoexposure_roi.py | 4 +- examples/24_opencv_support.py | 2 +- examples/25_system_information.py | 4 +- examples/26_1_spatial_mobilenet.py | 8 +- examples/26_2_spatial_mobilenet_mono.py | 6 +- examples/26_3_spatial_tiny_yolo.py | 14 +- examples/27_spatial_location_calculator.py | 140 +++++++++--------- examples/28_camera_video_example.py | 2 +- 20 files changed, 104 insertions(+), 139 deletions(-) diff --git a/docs/source/samples/21_mobilenet_decoding_on_device.rst b/docs/source/samples/21_mobilenet_decoding_on_device.rst index 7c4f30520..c0f945164 100644 --- a/docs/source/samples/21_mobilenet_decoding_on_device.rst +++ b/docs/source/samples/21_mobilenet_decoding_on_device.rst @@ -1,32 +1,6 @@ 21 - RGB & MobilenetSSD decoding on device ========================================== -This example shows how to run MobileNetv2SSD on the RGB input frame, and how to display both the RGB -preview and the metadata results from the MobileNetv2SSD on the preview. It's similar to example '08_rgb_mobilenet' except -decoding is done on Myriad instead on the host. - - -setConfidenceThreshold - confidence threshold above which objects are detected - -Demo -#### - - -Setup -##### - -.. include:: /includes/install_from_pypi.rst - -This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from -`here `__ - -Source code -########### - -Also `available on GitHub `__ - -.. literalinclude:: ../../../examples/21_mobilenet_device_side_decoding.py - :language: python - :linenos: +This demo was migrated to :ref:`08 - RGB & MobilenetSSD` .. include:: /includes/footer-short.rst diff --git a/docs/source/samples/23_autoexposure_roi.rst b/docs/source/samples/23_autoexposure_roi.rst index 53bd71727..2b513067e 100644 --- a/docs/source/samples/23_autoexposure_roi.rst +++ b/docs/source/samples/23_autoexposure_roi.rst @@ -18,8 +18,8 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from -`here `__ +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_5shave.blob` file) to work - you can download it from +`here `__ Usage ##### diff --git a/docs/source/samples/24_opencv_support.rst b/docs/source/samples/24_opencv_support.rst index 4316e0208..bb69cf556 100644 --- a/docs/source/samples/24_opencv_support.rst +++ b/docs/source/samples/24_opencv_support.rst @@ -10,9 +10,6 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from -`here `__ - Source code ########### diff --git a/docs/source/samples/25_system_information.rst b/docs/source/samples/25_system_information.rst index fa89e19f2..65b21b81a 100644 --- a/docs/source/samples/25_system_information.rst +++ b/docs/source/samples/25_system_information.rst @@ -9,8 +9,6 @@ Setup .. include:: /includes/install_from_pypi.rst -For additional information, please follow :ref:`Python API installation guide ` - Source code ########### diff --git a/docs/source/samples/26_1_spatial_mobilenet.rst b/docs/source/samples/26_1_spatial_mobilenet.rst index 26b49fb86..5677538b3 100644 --- a/docs/source/samples/26_1_spatial_mobilenet.rst +++ b/docs/source/samples/26_1_spatial_mobilenet.rst @@ -18,7 +18,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/26_2_spatial_mobilenet_mono.rst b/docs/source/samples/26_2_spatial_mobilenet_mono.rst index 89a9eb08d..ac519d2e2 100644 --- a/docs/source/samples/26_2_spatial_mobilenet_mono.rst +++ b/docs/source/samples/26_2_spatial_mobilenet_mono.rst @@ -18,7 +18,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`mobilenet.blob` file) to work - you can download it from +This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code diff --git a/docs/source/samples/27_spatial_location_calculator.rst b/docs/source/samples/27_spatial_location_calculator.rst index deefae8e0..92c607967 100644 --- a/docs/source/samples/27_spatial_location_calculator.rst +++ b/docs/source/samples/27_spatial_location_calculator.rst @@ -1,7 +1,7 @@ 27 - Spatial location calculator ================================ -This example shows how to retrieve spatial location data (X,Y,Z) on a runtime configurable ROI. +This example shows how to retrieve spatial location data (X,Y,Z) on a runtime configurable ROI. You can move the ROI using WASD keys. X,Y,Z coordinates are relative to the center of depth map. @@ -16,10 +16,6 @@ Setup .. include:: /includes/install_from_pypi.rst - -This example also requires MobilenetSDD blob (:code:`mobilenet-ssd_openvino_2021.2_6shave.blob` file) to work - you can download it from -`here `__ - Source code ########### diff --git a/examples/16_device_queue_event.py b/examples/16_device_queue_event.py index 419e6fc35..132549341 100755 --- a/examples/16_device_queue_event.py +++ b/examples/16_device_queue_event.py @@ -48,7 +48,7 @@ # because queues can be set to non-blocking (overwriting) behavior message = device.getOutputQueue(queueName).get() - # display arrived frames + # Display arrived frames if type(message) == dai.ImgFrame: cv2.imshow(queueName, message.getCvFrame()) diff --git a/examples/17_video_mobilenet.py b/examples/17_video_mobilenet.py index d42bca9e4..a5691ec05 100755 --- a/examples/17_video_mobilenet.py +++ b/examples/17_video_mobilenet.py @@ -39,14 +39,14 @@ "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() - # Output queue will be used to get nn data from the video frames. # Input queue will be used to send video frames to the device. qIn = device.getInputQueue(name="inFrame") + # Output queue will be used to get nn data from the video frames. qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False) frame = None diff --git a/examples/19_mono_camera_control.py b/examples/19_mono_camera_control.py index d59cf3737..54ef0ba1f 100755 --- a/examples/19_mono_camera_control.py +++ b/examples/19_mono_camera_control.py @@ -41,7 +41,7 @@ def clamp(num, v0, v1): return max(v0, min(num, v1)) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/22_1_tiny_yolo_v3_device_side_decoding.py b/examples/22_1_tiny_yolo_v3_device_side_decoding.py index 36b005225..3dd8cbc50 100755 --- a/examples/22_1_tiny_yolo_v3_device_side_decoding.py +++ b/examples/22_1_tiny_yolo_v3_device_side_decoding.py @@ -14,7 +14,7 @@ import numpy as np import time -# tiny yolo v3 label texts +# Tiny yolo v3 label texts labelMap = [ "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", @@ -75,7 +75,7 @@ detectionNetwork.out.link(nnOut.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/22_2_tiny_yolo_v4_device_side_decoding.py b/examples/22_2_tiny_yolo_v4_device_side_decoding.py index 4f638ee55..d33d95286 100755 --- a/examples/22_2_tiny_yolo_v4_device_side_decoding.py +++ b/examples/22_2_tiny_yolo_v4_device_side_decoding.py @@ -46,7 +46,7 @@ camRgb.setInterleaved(False) camRgb.setFps(40) -# network specific settings +# Network specific settings detectionNetwork = pipeline.createYoloDetectionNetwork() detectionNetwork.setConfidenceThreshold(0.5) detectionNetwork.setNumClasses(80) @@ -74,7 +74,7 @@ detectionNetwork.out.link(nnOut.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/23_autoexposure_roi.py b/examples/23_autoexposure_roi.py index d565334f8..4ef538503 100755 --- a/examples/23_autoexposure_roi.py +++ b/examples/23_autoexposure_roi.py @@ -100,7 +100,7 @@ def bboxToRoi(bbox): return roi -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -114,7 +114,7 @@ def bboxToRoi(bbox): nnRegion = True region = AutoExposureRegion() - + # nn data (bounding box locations) are in <0..1> range - they need to be normalized with frame width/height def frameNorm(frame, bbox): normVals = np.full(len(bbox), frame.shape[0]) diff --git a/examples/24_opencv_support.py b/examples/24_opencv_support.py index 891bb09eb..81c9121f5 100755 --- a/examples/24_opencv_support.py +++ b/examples/24_opencv_support.py @@ -23,7 +23,7 @@ camRgb.preview.link(xoutPreview.input) camRgb.video.link(xoutVideo.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() diff --git a/examples/25_system_information.py b/examples/25_system_information.py index cc118b3cb..2304a4cc8 100755 --- a/examples/25_system_information.py +++ b/examples/25_system_information.py @@ -27,7 +27,7 @@ def print_sys_info(info): linkOut.setStreamName("sysinfo") sys_logger.out.link(linkOut.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -36,7 +36,7 @@ def print_sys_info(info): q_sysinfo = device.getOutputQueue(name="sysinfo", maxSize=4, blocking=False) while True: - info = q_sysinfo.get() # blocking call, will wait until a new data has arrived + info = q_sysinfo.get() # Blocking call, will wait until a new data has arrived print_sys_info(info) if cv2.waitKey(1) == ord('q'): diff --git a/examples/26_1_spatial_mobilenet.py b/examples/26_1_spatial_mobilenet.py index e0f522b11..1facd9b66 100755 --- a/examples/26_1_spatial_mobilenet.py +++ b/examples/26_1_spatial_mobilenet.py @@ -54,7 +54,7 @@ monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) -# setting node configs +# Setting node configs stereo.setOutputDepth(True) stereo.setConfidenceThreshold(255) @@ -82,7 +82,7 @@ stereo.depth.link(spatialDetectionNetwork.inputDepth) spatialDetectionNetwork.passthroughDepth.link(xoutDepth.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -137,11 +137,11 @@ cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) - # if the frame is available, draw bounding boxes on it and show the frame + # If the frame is available, draw bounding boxes on it and show the frame height = frame.shape[0] width = frame.shape[1] for detection in detections: - # denormalize bounding box + # Denormalize bounding box x1 = int(detection.xmin * width) x2 = int(detection.xmax * width) y1 = int(detection.ymin * height) diff --git a/examples/26_2_spatial_mobilenet_mono.py b/examples/26_2_spatial_mobilenet_mono.py index 7e17bc2a0..a6bc40f25 100755 --- a/examples/26_2_spatial_mobilenet_mono.py +++ b/examples/26_2_spatial_mobilenet_mono.py @@ -85,7 +85,7 @@ stereo.depth.link(spatialDetectionNetwork.inputDepth) spatialDetectionNetwork.passthroughDepth.link(xoutDepth.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -142,7 +142,7 @@ if flipRectified: rectifiedRight = cv2.flip(rectifiedRight, 1) - # if the rectifiedRight is available, draw bounding boxes on it and show the rectifiedRight + # If the rectifiedRight is available, draw bounding boxes on it and show the rectifiedRight height = rectifiedRight.shape[0] width = rectifiedRight.shape[1] for detection in detections: @@ -150,7 +150,7 @@ swap = detection.xmin detection.xmin = 1 - detection.xmax detection.xmax = 1 - swap - # denormalize bounding box + # Denormalize bounding box x1 = int(detection.xmin * width) x2 = int(detection.xmax * width) y1 = int(detection.ymin * height) diff --git a/examples/26_3_spatial_tiny_yolo.py b/examples/26_3_spatial_tiny_yolo.py index 3c1c18229..2da9e706c 100755 --- a/examples/26_3_spatial_tiny_yolo.py +++ b/examples/26_3_spatial_tiny_yolo.py @@ -13,7 +13,7 @@ Can be used for tiny-yolo-v3 or tiny-yolo-v4 networks ''' -# tiny yolo v3/4 label texts +# Tiny yolo v3/4 label texts labelMap = [ "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", @@ -32,7 +32,7 @@ syncNN = True # Get argument first -nnBlobPath = str((Path(__file__).parent / Path('models/mobilenet-ssd_openvino_2021.2_6shave.blob')).resolve().absolute()) +nnBlobPath = str((Path(__file__).parent / Path('models/tiny-yolo-v4_openvino_2021.2_6shave.blob')).resolve().absolute()) if len(sys.argv) > 1: nnBlobPath = sys.argv[1] @@ -77,7 +77,7 @@ spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5) spatialDetectionNetwork.setDepthLowerThreshold(100) spatialDetectionNetwork.setDepthUpperThreshold(5000) -# yolo specific parameters +# Yolo specific parameters spatialDetectionNetwork.setNumClasses(80) spatialDetectionNetwork.setCoordinateSize(4) spatialDetectionNetwork.setAnchors(np.array([10,14, 23,27, 37,58, 81,82, 135,169, 344,319])) @@ -101,7 +101,7 @@ stereo.depth.link(spatialDetectionNetwork.inputDepth) spatialDetectionNetwork.passthroughDepth.link(xoutDepth.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() @@ -131,7 +131,7 @@ fps = counter / (current_time - startTime) counter = 0 startTime = current_time - + frame = inPreview.getCvFrame() depthFrame = depth.getFrame() @@ -156,11 +156,11 @@ cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) - # if the frame is available, draw bounding boxes on it and show the frame + # If the frame is available, draw bounding boxes on it and show the frame height = frame.shape[0] width = frame.shape[1] for detection in detections: - # denormalize bounding box + # Denormalize bounding box x1 = int(detection.xmin * width) x2 = int(detection.xmax * width) y1 = int(detection.ymin * height) diff --git a/examples/27_spatial_location_calculator.py b/examples/27_spatial_location_calculator.py index 4b46fb12e..0b696c287 100755 --- a/examples/27_spatial_location_calculator.py +++ b/examples/27_spatial_location_calculator.py @@ -59,73 +59,73 @@ spatialLocationCalculator.out.link(xoutSpatialData.input) xinSpatialCalcConfig.out.link(spatialLocationCalculator.inputConfig) -# Pipeline defined, now the device is assigned and pipeline is started -device = dai.Device(pipeline) -device.startPipeline() - -# Output queue will be used to get the depth frames from the outputs defined above -depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False) -spatialCalcQueue = device.getOutputQueue(name="spatialData", maxSize=4, blocking=False) -spatialCalcConfigInQueue = device.getInputQueue("spatialCalcConfig") - -color = (255, 255, 255) - -print("Use WASD keys to move ROI!") - -while True: - inDepth = depthQueue.get() # blocking call, will wait until a new data has arrived - inDepthAvg = spatialCalcQueue.get() # blocking call, will wait until a new data has arrived - - depthFrame = inDepth.getFrame() - depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) - depthFrameColor = cv2.equalizeHist(depthFrameColor) - depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) - - spatialData = inDepthAvg.getSpatialLocations() - for depthData in spatialData: - roi = depthData.config.roi - roi = roi.denormalize(width=depthFrameColor.shape[1], height=depthFrameColor.shape[0]) - xmin = int(roi.topLeft().x) - ymin = int(roi.topLeft().y) - xmax = int(roi.bottomRight().x) - ymax = int(roi.bottomRight().y) - - fontType = cv2.FONT_HERSHEY_TRIPLEX - cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) - cv2.putText(depthFrameColor, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymin + 20), fontType, 0.5, color) - cv2.putText(depthFrameColor, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymin + 35), fontType, 0.5, color) - cv2.putText(depthFrameColor, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymin + 50), fontType, 0.5, color) - - - cv2.imshow("depth", depthFrameColor) - - newConfig = False - key = cv2.waitKey(1) - if key == ord('q'): - break - elif key == ord('w'): - if topLeft.y - stepSize >= 0: - topLeft.y -= stepSize - bottomRight.y -= stepSize - newConfig = True - elif key == ord('a'): - if topLeft.x - stepSize >= 0: - topLeft.x -= stepSize - bottomRight.x -= stepSize - newConfig = True - elif key == ord('s'): - if bottomRight.y + stepSize <= 1: - topLeft.y += stepSize - bottomRight.y += stepSize - newConfig = True - elif key == ord('d'): - if bottomRight.x + stepSize <= 1: - topLeft.x += stepSize - bottomRight.x += stepSize - newConfig = True - - if newConfig: - config.roi = dai.Rect(topLeft, bottomRight) - cfg = dai.SpatialLocationCalculatorConfig() - cfg.addROI(config) - spatialCalcConfigInQueue.send(cfg) \ No newline at end of file +# Pipeline is defined, now we can connect to the device +with dai.Device(pipeline) as device: + device.startPipeline() + + # Output queue will be used to get the depth frames from the outputs defined above + depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False) + spatialCalcQueue = device.getOutputQueue(name="spatialData", maxSize=4, blocking=False) + spatialCalcConfigInQueue = device.getInputQueue("spatialCalcConfig") + + color = (255, 255, 255) + + print("Use WASD keys to move ROI!") + + while True: + inDepth = depthQueue.get() # Blocking call, will wait until a new data has arrived + inDepthAvg = spatialCalcQueue.get() # Blocking call, will wait until a new data has arrived + + depthFrame = inDepth.getFrame() + depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1) + depthFrameColor = cv2.equalizeHist(depthFrameColor) + depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) + + spatialData = inDepthAvg.getSpatialLocations() + for depthData in spatialData: + roi = depthData.config.roi + roi = roi.denormalize(width=depthFrameColor.shape[1], height=depthFrameColor.shape[0]) + xmin = int(roi.topLeft().x) + ymin = int(roi.topLeft().y) + xmax = int(roi.bottomRight().x) + ymax = int(roi.bottomRight().y) + + fontType = cv2.FONT_HERSHEY_TRIPLEX + cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX) + cv2.putText(depthFrameColor, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymin + 20), fontType, 0.5, color) + cv2.putText(depthFrameColor, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymin + 35), fontType, 0.5, color) + cv2.putText(depthFrameColor, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymin + 50), fontType, 0.5, color) + + + cv2.imshow("depth", depthFrameColor) + + newConfig = False + key = cv2.waitKey(1) + if key == ord('q'): + break + elif key == ord('w'): + if topLeft.y - stepSize >= 0: + topLeft.y -= stepSize + bottomRight.y -= stepSize + newConfig = True + elif key == ord('a'): + if topLeft.x - stepSize >= 0: + topLeft.x -= stepSize + bottomRight.x -= stepSize + newConfig = True + elif key == ord('s'): + if bottomRight.y + stepSize <= 1: + topLeft.y += stepSize + bottomRight.y += stepSize + newConfig = True + elif key == ord('d'): + if bottomRight.x + stepSize <= 1: + topLeft.x += stepSize + bottomRight.x += stepSize + newConfig = True + + if newConfig: + config.roi = dai.Rect(topLeft, bottomRight) + cfg = dai.SpatialLocationCalculatorConfig() + cfg.addROI(config) + spatialCalcConfigInQueue.send(cfg) \ No newline at end of file diff --git a/examples/28_camera_video_example.py b/examples/28_camera_video_example.py index e7c949853..d3f9be095 100644 --- a/examples/28_camera_video_example.py +++ b/examples/28_camera_video_example.py @@ -21,7 +21,7 @@ colorCam.video.link(xoutVideo.input) -# Pipeline defined, now the device is connected to +# Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: # Start pipeline device.startPipeline() From 7017e77b7b1ddc33cfb199d575d3cacf1bd26d40 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Thu, 1 Apr 2021 21:09:30 +0200 Subject: [PATCH 05/36] removed 21 from the index (since we migrated the demo to 08), added examples/models into gitignore (since blobs are stored there) --- .gitignore | 3 +++ docs/source/index.rst | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index e3e51e27a..aac452dce 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,6 @@ _builds/ #ci wheelhouse/ + +# Example blobs/files +examples/models/ \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index b073c9edd..77c7fbe89 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -90,7 +90,6 @@ Now, pick a tutorial or code sample and start utilizing Gen2 capabilities samples/16_device_queue_event.rst samples/17_video_mobilenet.rst samples/18_rgb_encoding_mobilenet.rst - samples/21_mobilenet_decoding_on_device.rst samples/22_1_tiny_yolo_v3_decoding_on_device.rst samples/22_2_tiny_yolo_v4_decoding_on_device.rst samples/23_autoexposure_roi.rst From 772a3665c526d0e371a9977d1f72a8f6eb5b7611 Mon Sep 17 00:00:00 2001 From: Erik Date: Fri, 2 Apr 2021 09:53:22 +0200 Subject: [PATCH 06/36] Update README.md when I install via apt (version 1.25.0), I get an error "ERROR: Couldn't connect to Docker daemon at http+docker://localhost - is it running?". Running docker-compose with sudo fixes it --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 7d9d8ee94..4e28b7c77 100644 --- a/README.md +++ b/README.md @@ -16,8 +16,8 @@ Documentation is available over at [Luxonis DepthAI API](https://docs.luxonis.co ``` cd docs - docker-compose build - docker-compose up + sudo docker-compose build + sudo docker-compose up ``` Then open [http://localhost:8000](http://localhost:8000). From 4b2ff9734fbe5471392f4a3f99caef798a3880dc Mon Sep 17 00:00:00 2001 From: Erik Date: Fri, 2 Apr 2021 10:15:18 +0200 Subject: [PATCH 07/36] Update 22_1_tiny_yolo_v3_decoding_on_device.rst --- docs/source/samples/22_1_tiny_yolo_v3_decoding_on_device.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/samples/22_1_tiny_yolo_v3_decoding_on_device.rst b/docs/source/samples/22_1_tiny_yolo_v3_decoding_on_device.rst index 2e4a99d67..c478fbf84 100644 --- a/docs/source/samples/22_1_tiny_yolo_v3_decoding_on_device.rst +++ b/docs/source/samples/22_1_tiny_yolo_v3_decoding_on_device.rst @@ -21,7 +21,7 @@ Setup .. include:: /includes/install_from_pypi.rst -This example also requires MobilenetSDD blob (:code:`tiny-yolo-v3_openvino_2021.2_6shave.blob` file) to work - you can download it from +This example also requires YoloV3-tiny blob (:code:`tiny-yolo-v3_openvino_2021.2_6shave.blob` file) to work - you can download it from `here `__ Source code From 22af0495a2df0e75fb58e248813f648625c35cf2 Mon Sep 17 00:00:00 2001 From: Erik Date: Fri, 2 Apr 2021 11:13:22 +0200 Subject: [PATCH 08/36] Update README.md --- README.md | 82 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/README.md b/README.md index 4e28b7c77..1e9ab074b 100644 --- a/README.md +++ b/README.md @@ -10,46 +10,6 @@ Python bindings for C++ depthai-core library Documentation is available over at [Luxonis DepthAI API](https://docs.luxonis.com/projects/api/en/latest/) -### Building documentation - -- **Using [Docker](https://docs.docker.com/) (with [Docker Compose](https://docs.docker.com/compose/install/))** - - ``` - cd docs - sudo docker-compose build - sudo docker-compose up - ``` - - Then open [http://localhost:8000](http://localhost:8000). - - This docker container will watch changes in the `docs/source` directory and rebuild the docs automatically - -- **Linux** - - First, please install the required [dependencies](#Dependencies) - - Then run the following commands to build the docs website - - ``` - python3 -m pip install -U pip - python3 -m pip install -r docs/requirements.txt - cmake -S . -B build -D DEPTHAI_BUILD_DOCS=ON -D DEPTHAI_PYTHON_BUILD_DOCS=ON - cmake --build build --parallel --target sphinx - python3 -m http.server --bind 0.0.0.0 8000 --directory build/docs/sphinx - ``` - - Then open [http://localhost:8000](http://localhost:8000). - - This will build documentation based on current sources, so if some new changes will be made, run this command - in a new terminal window to update the website source - - ``` - cmake --build build --parallel --target sphinx - ``` - - Then refresh your page - it should load the updated website that was just built - - ## Installation Prebuilt wheels are available in [Luxonis repository](https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local/) @@ -101,6 +61,48 @@ ctest - Raspbian 10; - macOS 10.14.6, 10.15.4; + +### Building documentation + +- **Using [Docker](https://docs.docker.com/) (with [Docker Compose](https://docs.docker.com/compose/install/))** + + ``` + cd docs + sudo docker-compose build + sudo docker-compose up + ``` + + You can leave out the `sudo` if you have added your user to the docker group. + + Then open [http://localhost:8000](http://localhost:8000). + + This docker container will watch changes in the `docs/source` directory and rebuild the docs automatically + +- **Linux** + + First, please install the required [dependencies](#Dependencies) + + Then run the following commands to build the docs website + + ``` + python3 -m pip install -U pip + python3 -m pip install -r docs/requirements.txt + cmake -S . -B build -D DEPTHAI_BUILD_DOCS=ON -D DEPTHAI_PYTHON_BUILD_DOCS=ON + cmake --build build --parallel --target sphinx + python3 -m http.server --bind 0.0.0.0 8000 --directory build/docs/sphinx + ``` + + Then open [http://localhost:8000](http://localhost:8000). + + This will build documentation based on current sources, so if some new changes will be made, run this command + in a new terminal window to update the website source + + ``` + cmake --build build --parallel --target sphinx + ``` + + Then refresh your page - it should load the updated website that was just built + ## Troubleshooting ### Relocation link error From b055a0aeea41a4508c06afc5034db178ffb2d2ca Mon Sep 17 00:00:00 2001 From: Martin Peterlin Date: Thu, 1 Apr 2021 02:32:44 +0200 Subject: [PATCH 09/36] Improved CMake docstring options --- .github/workflows/main.yml | 2 +- CMakeLists.txt | 42 ++++++++++++++++++++++++++++++-------- cmake/docstring.hpp.in | 2 +- setup.py | 1 - 4 files changed, 35 insertions(+), 12 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index df44c48c7..8d31a5715 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -48,7 +48,7 @@ jobs: brew install libusb python -m pip install git+git://github.com/luxonis/pybind11_mkdoc.git@master - name: Configure project - run: cmake -S . -B build -DDEPTHAI_PYTHON_BUILD_DOCSTRINGS=ON -DDEPTHAI_PYTHON_FORCE_DOCSTRINGS=ON -DDEPTHAI_PYTHON_DOCSTRINGS_OUTPUT="$PWD/docstrings/depthai_python_docstring.hpp" + run: cmake -S . -B build -DDEPTHAI_PYTHON_FORCE_DOCSTRINGS=ON -DDEPTHAI_PYTHON_DOCSTRINGS_OUTPUT="$PWD/docstrings/depthai_python_docstring.hpp" - name: Build target 'pybind11_mkdoc' run: cmake --build build --parallel --target pybind11_mkdoc - name: Upload docstring artifacts diff --git a/CMakeLists.txt b/CMakeLists.txt index 97ea05105..5a298f573 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -38,7 +38,7 @@ list(APPEND CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/depthai-core/cmake") # Constants set(DOCSTRINGS_INCLUDE_PLACEHOLDER_DIR ${CMAKE_CURRENT_BINARY_DIR}/generated/include) set(DOCSTRINGS_INCLUDE_PLACEHOLDER_PATH ${DOCSTRINGS_INCLUDE_PLACEHOLDER_DIR}/docstring.hpp) -set(DOCSTRINGS_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/generated/include/depthai_python_docstring.hpp) +set(DEFAULT_DOCSTRINGS_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/generated/depthai_python_docstring.hpp) # First specify options option(DEPTHAI_PYTHON_USE_FIND_PACKAGE "Use find_package for depthai-core" OFF) @@ -46,11 +46,6 @@ option(DEPTHAI_PYTHON_ENABLE_TESTS "Enable tests" OFF) option(DEPTHAI_PYTHON_ENABLE_EXAMPLES "Enable examples" OFF) option(DEPTHAI_PYTHON_BUILD_DOCS "Build documentation - see docs/requirements.txt for needed dependencies" OFF) option(DEPTHAI_PYTHON_BUILD_DOCSTRINGS "Generate docstrings from header files if module 'pybind11_mkdoc' available" ON) -set(DEPTHAI_PYTHON_DOCSTRINGS_INPUT ${DOCSTRINGS_OUTPUT} CACHE FILEPATH "Path to docstring for bindings") -set(DEPTHAI_PYTHON_DOCSTRINGS_OUTPUT ${DOCSTRINGS_OUTPUT} CACHE FILEPATH "Path where docstring file will be generated") -if(DEPTHAI_PYTHON_BUILD_DOCSTRINGS) - option(DEPTHAI_PYTHON_FORCE_DOCSTRINGS "Force that docstrings are generated, module 'pybind11_mkdoc' required" OFF) -endif() # Add external dependencies add_subdirectory(external) @@ -89,6 +84,35 @@ pybind11_add_module(${TARGET_NAME} src/log/LogBindings.cpp ) + +# Docstring options +if(DEPTHAI_PYTHON_DOCSTRINGS_INPUT AND DEPTHAI_PYTHON_DOCSTRINGS_OUTPUT) + message(FATAL_ERROR "DEPTHAI_PYTHON_DOCSTRINGS_INPUT and DEPTHAI_PYTHON_DOCSTRINGS_OUTPUT are mutually exclusive") +endif() + +if(DEPTHAI_PYTHON_DOCSTRINGS_OUTPUT) + # If output is specified set both input and output to same the path + set(docstring_input_path ${DEPTHAI_PYTHON_DOCSTRINGS_OUTPUT}) + set(docstring_output_path ${DEPTHAI_PYTHON_DOCSTRINGS_OUTPUT}) +else() + # If input docstrings explicitly specified, use those and disable building + if(DEPTHAI_PYTHON_DOCSTRINGS_INPUT) + set(docstring_input_path ${DEPTHAI_PYTHON_DOCSTRINGS_INPUT}) + message(STATUS "Disabled building of docstrings - using docstrings specified by DEPTHAI_PYTHON_DOCSTRINGS_INPUT (${DEPTHAI_PYTHON_DOCSTRINGS_INPUT})") + set(DEPTHAI_PYTHON_BUILD_DOCSTRINGS OFF CACHE BOOL "Generate docstrings from header files if module 'pybind11_mkdoc' available" FORCE) + else() + # Otherwise set default location as input + set(docstring_input_path ${DEFAULT_DOCSTRINGS_OUTPUT}) + endif() + + # Set default output location + set(docstring_output_path ${DEFAULT_DOCSTRINGS_OUTPUT}) +endif() + +if(DEPTHAI_PYTHON_BUILD_DOCSTRINGS) + option(DEPTHAI_PYTHON_FORCE_DOCSTRINGS "Force that docstrings are generated, module 'pybind11_mkdoc' required" OFF) +endif() + # Configure include placeholder with INPUT path configure_file(cmake/docstring.hpp.in ${DOCSTRINGS_INCLUDE_PLACEHOLDER_PATH}) # Add target to generate docstrings @@ -96,15 +120,15 @@ if (DEPTHAI_PYTHON_BUILD_DOCSTRINGS) include(pybind11-mkdoc) # Check if pybind11_mkdoc available and create target - target_pybind11_mkdoc_setup(${DEPTHAI_PYTHON_DOCSTRINGS_OUTPUT} depthai::core ${DEPTHAI_PYTHON_FORCE_DOCSTRINGS}) + target_pybind11_mkdoc_setup(${docstring_output_path} depthai::core ${DEPTHAI_PYTHON_FORCE_DOCSTRINGS}) if(NOT TARGET pybind11_mkdoc) # Generate default docstrings to OUTPUT path - configure_file(cmake/default_docstring.hpp.in ${DEPTHAI_PYTHON_DOCSTRINGS_OUTPUT} COPYONLY) + configure_file(cmake/default_docstring.hpp.in ${docstring_output_path} COPYONLY) endif() else() # Generate default docstrings to OUTPUT path - configure_file(cmake/default_docstring.hpp.in ${DEPTHAI_PYTHON_DOCSTRINGS_OUTPUT} COPYONLY) + configure_file(cmake/default_docstring.hpp.in ${docstring_output_path} COPYONLY) endif() # Add include directory diff --git a/cmake/docstring.hpp.in b/cmake/docstring.hpp.in index d6df7f197..6706b0961 100644 --- a/cmake/docstring.hpp.in +++ b/cmake/docstring.hpp.in @@ -1 +1 @@ -#include "@DEPTHAI_PYTHON_DOCSTRINGS_INPUT@" +#include "@docstring_input_path@" diff --git a/setup.py b/setup.py index 9ac5f7c57..c6e885a5c 100644 --- a/setup.py +++ b/setup.py @@ -104,7 +104,6 @@ def build_extension(self, ext): # Pass a docstring option if 'DEPTHAI_PYTHON_DOCSTRINGS_INPUT' in os.environ: cmake_args += ['-DDEPTHAI_PYTHON_DOCSTRINGS_INPUT='+os.environ['DEPTHAI_PYTHON_DOCSTRINGS_INPUT']] - cmake_args += ['-DDEPTHAI_PYTHON_BUILD_DOCSTRINGS=OFF'] # Pass installation directory if 'DEPTHAI_INSTALLATION_DIR' in os.environ: From f970bfcdcc78f3f5926ff08340d71dba918a9696 Mon Sep 17 00:00:00 2001 From: Martin Peterlin Date: Thu, 1 Apr 2021 02:47:16 +0200 Subject: [PATCH 10/36] Addresses CMake 3.20 regression in parsing '--parallel' ('-j') option --- .github/workflows/main.yml | 6 +++--- README.md | 13 ++++++++++--- depthai-core | 2 +- docs/docker/update_docs.sh | 2 +- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 8d31a5715..9958b15b8 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -50,7 +50,7 @@ jobs: - name: Configure project run: cmake -S . -B build -DDEPTHAI_PYTHON_FORCE_DOCSTRINGS=ON -DDEPTHAI_PYTHON_DOCSTRINGS_OUTPUT="$PWD/docstrings/depthai_python_docstring.hpp" - name: Build target 'pybind11_mkdoc' - run: cmake --build build --parallel --target pybind11_mkdoc + run: cmake --build build --target pybind11_mkdoc --parallel - name: Upload docstring artifacts uses: actions/upload-artifact@v2 with: @@ -247,7 +247,7 @@ jobs: - name: Build and install depthai-core run: | cmake -S depthai-core/ -B build_core -D CMAKE_BUILD_TYPE=Release -D CMAKE_TOOLCHAIN_FILE=$PWD/cmake/toolchain/pic.cmake - cmake --build build_core --parallel --target install + cmake --build build_core --target install --parallel echo "DEPTHAI_INSTALLATION_DIR=$PWD/build_core/install/" >> $GITHUB_ENV - name: Append build hash if not a tagged commit @@ -309,7 +309,7 @@ jobs: - name: Build and install depthai-core run: | cmake -S depthai-core/ -B build_core -D CMAKE_BUILD_TYPE=Release -D CMAKE_TOOLCHAIN_FILE=$PWD/cmake/toolchain/pic.cmake - cmake --build build_core --parallel --target install + cmake --build build_core --target install --parallel echo "DEPTHAI_INSTALLATION_DIR=$PWD/build_core/install/" >> $GITHUB_ENV - name: Append build hash if not a tagged commit diff --git a/README.md b/README.md index 7d9d8ee94..41ba89f3a 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ Documentation is available over at [Luxonis DepthAI API](https://docs.luxonis.co python3 -m pip install -U pip python3 -m pip install -r docs/requirements.txt cmake -S . -B build -D DEPTHAI_BUILD_DOCS=ON -D DEPTHAI_PYTHON_BUILD_DOCS=ON - cmake --build build --parallel --target sphinx + cmake --build build --target sphinx --parallel python3 -m http.server --bind 0.0.0.0 8000 --directory build/docs/sphinx ``` @@ -44,7 +44,7 @@ Documentation is available over at [Luxonis DepthAI API](https://docs.luxonis.co in a new terminal window to update the website source ``` - cmake --build build --parallel --target sphinx + cmake --build build --target sphinx --parallel ``` Then refresh your page - it should load the updated website that was just built @@ -74,15 +74,22 @@ See: [depthai-core dependencies](https://github.com/luxonis/depthai-core#depende To build a shared library from source perform the following: ``` mkdir build && cd build -cmake .. +cmake .. [-D PYTHON_EXECUTABLE=/full/path/to/python] cmake --build . --parallel ``` +Where `-D PYTHON_EXECUTABLE` option can optionally specify an exact Python executable to use for building. + To build a wheel, execute the following ``` python3 -m pip wheel . -w wheelhouse ``` +To build and install using pip: +``` +python3 -m pip install . +``` + ## Running tests To run the tests build the library with the following options diff --git a/depthai-core b/depthai-core index 3e00d3f9f..06dcfd5da 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 3e00d3f9f1deb602d50e1464253c05c7e1e157e7 +Subproject commit 06dcfd5daab08591a58811763c8bd28c22c0ca7f diff --git a/docs/docker/update_docs.sh b/docs/docker/update_docs.sh index a2362a59b..69e4ca0d5 100755 --- a/docs/docker/update_docs.sh +++ b/docs/docker/update_docs.sh @@ -2,7 +2,7 @@ set -e -cmake --build build --parallel --target sphinx +cmake --build build --target sphinx --parallel # fix missing index.css file cp /app/docs/source/_static/css/* /app/build/docs/sphinx/_static/css From fceee8efef0d9f8621bbf24fa5fb1c13741caf84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Pi=C5=82atowski?= Date: Fri, 2 Apr 2021 16:09:24 +0200 Subject: [PATCH 11/36] Remove video tutorials pointing to gen1 --- docs/source/install.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/source/install.rst b/docs/source/install.rst index 5563dfb70..69aa20093 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -16,14 +16,14 @@ Supported Platforms We keep up-to-date, pre-compiled, libraries for the following platforms. Note that a new change is that for Ubuntu now also work unchanged for the Jetson/Xavier series: -======================== =========================================== ================================================= ================================================================================ -Platform Instructions Tutorial Support -======================== =========================================== ================================================= ================================================================================ -Windows 10 :ref:`Platform dependencies ` `Video tutorial `__ `Discord `__ -macOS :ref:`Platform dependencies ` `Video tutorial `__ `Discord `__ -Ubuntu & Jetson/Xavier :ref:`Platform dependencies ` `Video tutorial `__ `Discord `__ -Raspberry Pi :ref:`Platform dependencies ` `Video tutorial `__ `Discord `__ -======================== =========================================== ================================================= ================================================================================ +======================== ============================================== ================================================================================ +Platform Instructions Support +======================== ============================================== ================================================================================ +Windows 10 :ref:`Platform dependencies ` `Discord `__ +macOS :ref:`Platform dependencies ` `Discord `__ +Ubuntu & Jetson/Xavier :ref:`Platform dependencies ` `Discord `__ +Raspberry Pi OS :ref:`Platform dependencies ` `Discord `__ +======================== ============================================== ================================================================================ And the following platforms are also supported by a combination of the community and Luxonis. From 459689e6af32f984adf9f543cdd2cbb40ed16446 Mon Sep 17 00:00:00 2001 From: Karolina Date: Fri, 2 Apr 2021 23:48:12 +0200 Subject: [PATCH 12/36] 19_mono_camera_control docs Added docs for mono camera control. Demo TBA. --- .../source/samples/19_mono_camera_control.rst | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 docs/source/samples/19_mono_camera_control.rst diff --git a/docs/source/samples/19_mono_camera_control.rst b/docs/source/samples/19_mono_camera_control.rst new file mode 100644 index 000000000..a9880a914 --- /dev/null +++ b/docs/source/samples/19_mono_camera_control.rst @@ -0,0 +1,22 @@ +19 - Mono Camera Control +========================= + +This example shows how to control two mono cameras, set up a pipeline that outputs grayscale camera images, connects over XLink to transfer these +to the host real-time, and displays both using OpenCV. + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + + +Source code +########### + +Also `available on GitHub `__ + +.. literalinclude:: ../../../examples/19_mono_camera_control.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst From 7f2c66d70e8a28e9f4ea62e0ac437fd764c390b7 Mon Sep 17 00:00:00 2001 From: Karolina Date: Sat, 3 Apr 2021 00:10:24 +0200 Subject: [PATCH 13/36] 20_color_rotate_warp docs Added docs for color rotate warp. Demo TBA. --- docs/source/samples/20_color_rotate_warp.rst | 22 ++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 docs/source/samples/20_color_rotate_warp.rst diff --git a/docs/source/samples/20_color_rotate_warp.rst b/docs/source/samples/20_color_rotate_warp.rst new file mode 100644 index 000000000..9d254bdbc --- /dev/null +++ b/docs/source/samples/20_color_rotate_warp.rst @@ -0,0 +1,22 @@ +20 - Color Rotate Warp +========================= + +This example shows usage of ImageWarp to crop a rotated rectangle area on a frame, or perform various image transforms: rotate, mirror, flip, +perspective transform. + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + + +Source code +########### + +Also `available on GitHub `__ + +.. literalinclude:: ../../../examples/20_color_rotate_warp.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst From f9264c3f4b740cda1cc9655fdd576098c215e437 Mon Sep 17 00:00:00 2001 From: Erik Date: Sat, 3 Apr 2021 09:46:50 +0200 Subject: [PATCH 14/36] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1e9ab074b..074299f45 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ ctest sudo docker-compose up ``` - You can leave out the `sudo` if you have added your user to the docker group. + You can leave out the `sudo` if you have added your user to the `docker` group (or are using rootless docker). Then open [http://localhost:8000](http://localhost:8000). From 3a7b748e565267f579e5664423ecbe6bab98ceb7 Mon Sep 17 00:00:00 2001 From: Erik Date: Sat, 3 Apr 2021 11:19:02 +0200 Subject: [PATCH 15/36] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 074299f45..4a4fcd54a 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ ctest sudo docker-compose up ``` - You can leave out the `sudo` if you have added your user to the `docker` group (or are using rootless docker). + > ℹ️ You can leave out the `sudo` if you have added your user to the `docker` group (or are using rootless docker). Then open [http://localhost:8000](http://localhost:8000). From d182f7cb9fb80fa6ee9c6598636e629a0cdb6079 Mon Sep 17 00:00:00 2001 From: Karolina Date: Mon, 5 Apr 2021 12:31:48 +0200 Subject: [PATCH 16/36] title styling updated update after comments --- docs/source/samples/20_color_rotate_warp.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/samples/20_color_rotate_warp.rst b/docs/source/samples/20_color_rotate_warp.rst index 9d254bdbc..ab27a8d81 100644 --- a/docs/source/samples/20_color_rotate_warp.rst +++ b/docs/source/samples/20_color_rotate_warp.rst @@ -1,5 +1,5 @@ 20 - Color Rotate Warp -========================= +====================== This example shows usage of ImageWarp to crop a rotated rectangle area on a frame, or perform various image transforms: rotate, mirror, flip, perspective transform. From 043e732fbc18fcace091480e73e9d6f451c8e920 Mon Sep 17 00:00:00 2001 From: cafemoloko Date: Mon, 5 Apr 2021 13:57:56 +0200 Subject: [PATCH 17/36] added 19 & 20 to index --- docs/source/index.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/source/index.rst b/docs/source/index.rst index 77c7fbe89..a4e1a31e2 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -90,6 +90,8 @@ Now, pick a tutorial or code sample and start utilizing Gen2 capabilities samples/16_device_queue_event.rst samples/17_video_mobilenet.rst samples/18_rgb_encoding_mobilenet.rst + samples/19_mono_camera_control docs.rst + samples/20_color_rotate_warp.rst samples/22_1_tiny_yolo_v3_decoding_on_device.rst samples/22_2_tiny_yolo_v4_decoding_on_device.rst samples/23_autoexposure_roi.rst From 0f9c8378f1417438988990da3645362bf7d97bf7 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Mon, 5 Apr 2021 14:24:44 +0200 Subject: [PATCH 18/36] title styling fix --- docs/source/samples/19_mono_camera_control.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/samples/19_mono_camera_control.rst b/docs/source/samples/19_mono_camera_control.rst index a9880a914..e3fa7187e 100644 --- a/docs/source/samples/19_mono_camera_control.rst +++ b/docs/source/samples/19_mono_camera_control.rst @@ -1,5 +1,5 @@ 19 - Mono Camera Control -========================= +======================== This example shows how to control two mono cameras, set up a pipeline that outputs grayscale camera images, connects over XLink to transfer these to the host real-time, and displays both using OpenCV. From 2d01871eaeedfc471d62cc69b000f139170c51c0 Mon Sep 17 00:00:00 2001 From: cafemoloko Date: Mon, 5 Apr 2021 18:15:24 +0200 Subject: [PATCH 19/36] description: changed to ImageManip --- docs/source/samples/20_color_rotate_warp.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/samples/20_color_rotate_warp.rst b/docs/source/samples/20_color_rotate_warp.rst index ab27a8d81..9c2ae2a3a 100644 --- a/docs/source/samples/20_color_rotate_warp.rst +++ b/docs/source/samples/20_color_rotate_warp.rst @@ -1,7 +1,7 @@ 20 - Color Rotate Warp ====================== -This example shows usage of ImageWarp to crop a rotated rectangle area on a frame, or perform various image transforms: rotate, mirror, flip, +This example shows usage of ImageManip to crop a rotated rectangle area on a frame, or perform various image transforms: rotate, mirror, flip, perspective transform. Setup From 2a34bc98dfca1584807660f5a339c7349a2ade13 Mon Sep 17 00:00:00 2001 From: cafemoloko Date: Mon, 5 Apr 2021 19:38:35 +0200 Subject: [PATCH 20/36] 20_color_rotate_warp.py - typo corrected --- examples/20_color_rotate_warp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/20_color_rotate_warp.py b/examples/20_color_rotate_warp.py index dd8b7e085..6da521be5 100755 --- a/examples/20_color_rotate_warp.py +++ b/examples/20_color_rotate_warp.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 """ -This example shows usage of ImageWarp to crop a rotated rectangle area on a frame, +This example shows usage of ImageManip to crop a rotated rectangle area on a frame, or perform various image transforms: rotate, mirror, flip, perspective transform. """ From fda93247054ff863c49b315fa9f52a2f204ee1cb Mon Sep 17 00:00:00 2001 From: Karolina Date: Tue, 6 Apr 2021 19:19:03 +0200 Subject: [PATCH 21/36] 19_mono_camera_control.rst link corrected there was a typo in a link --- docs/source/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/index.rst b/docs/source/index.rst index a4e1a31e2..d9a8cd303 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -90,7 +90,7 @@ Now, pick a tutorial or code sample and start utilizing Gen2 capabilities samples/16_device_queue_event.rst samples/17_video_mobilenet.rst samples/18_rgb_encoding_mobilenet.rst - samples/19_mono_camera_control docs.rst + samples/19_mono_camera_control.rst samples/20_color_rotate_warp.rst samples/22_1_tiny_yolo_v3_decoding_on_device.rst samples/22_2_tiny_yolo_v4_decoding_on_device.rst From 75a2970f603a610eadbc345dd68a924f107a832e Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Thu, 1 Apr 2021 22:46:52 +0300 Subject: [PATCH 22/36] Add simple model downloader forked from model zoo --- examples/downloader/.gitignore | 5 + examples/downloader/common.py | 675 ++++++++++++++++++++++ examples/downloader/downloader.py | 385 ++++++++++++ examples/models/.gitignore | 2 + examples/models/mobilenet-ssd/model.yml | 33 ++ examples/models/tiny-yolo/model.yml | 29 + examples/models/video-resources/model.yml | 26 + 7 files changed, 1155 insertions(+) create mode 100644 examples/downloader/.gitignore create mode 100644 examples/downloader/common.py create mode 100755 examples/downloader/downloader.py create mode 100644 examples/models/.gitignore create mode 100644 examples/models/mobilenet-ssd/model.yml create mode 100644 examples/models/tiny-yolo/model.yml create mode 100644 examples/models/video-resources/model.yml diff --git a/examples/downloader/.gitignore b/examples/downloader/.gitignore new file mode 100644 index 000000000..87158aee7 --- /dev/null +++ b/examples/downloader/.gitignore @@ -0,0 +1,5 @@ +* +!common.py +!converter.py +!downloader.py +!.gitignore \ No newline at end of file diff --git a/examples/downloader/common.py b/examples/downloader/common.py new file mode 100644 index 000000000..f49a3574b --- /dev/null +++ b/examples/downloader/common.py @@ -0,0 +1,675 @@ +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import concurrent.futures +import contextlib +import fnmatch +import json +import platform +import queue +import re +import shlex +import shutil +import signal +import subprocess +import sys +import threading +import traceback + +from pathlib import Path + +import requests +import yaml + +DOWNLOAD_TIMEOUT = 5 * 60 +OMZ_ROOT = Path(__file__).resolve().parents[1] +MODEL_ROOT = OMZ_ROOT / 'models' + +# make sure to update the documentation if you modify these +KNOWN_FRAMEWORKS = { + 'caffe': None, + 'caffe2': 'caffe2_to_onnx.py', + 'dldt': None, + 'mxnet': None, + 'onnx': None, + 'pytorch': 'pytorch_to_onnx.py', + 'tf': None, +} +KNOWN_PRECISIONS = { + 'FP16', 'FP16-INT1', 'FP16-INT8', + 'FP32', 'FP32-INT1', 'FP32-INT8', +} +KNOWN_TASK_TYPES = { + 'action_recognition', + 'classification', + 'colorization', + 'detection', + 'face_recognition', + 'feature_extraction', + 'head_pose_estimation', + 'human_pose_estimation', + 'image_inpainting', + 'image_processing', + 'image_translation', + 'instance_segmentation', + 'machine_translation', + 'monocular_depth_estimation', + 'object_attributes', + 'optical_character_recognition', + 'place_recognition', + 'question_answering', + 'semantic_segmentation', + 'sound_classification', + 'speech_recognition', + 'style_transfer', + 'token_recognition', + 'text_to_speech', +} + +KNOWN_QUANTIZED_PRECISIONS = {p + '-INT8': p for p in ['FP16', 'FP32']} +assert KNOWN_QUANTIZED_PRECISIONS.keys() <= KNOWN_PRECISIONS + +RE_MODEL_NAME = re.compile(r'[0-9a-zA-Z._-]+') +RE_SHA256SUM = re.compile(r'[0-9a-fA-F]{64}') + + +class JobContext: + def __init__(self): + self._interrupted = False + + def print(self, value, *, end='\n', file=sys.stdout, flush=False): + raise NotImplementedError + + def printf(self, format, *args, file=sys.stdout, flush=False): + self.print(format.format(*args), file=file, flush=flush) + + def subprocess(self, args, **kwargs): + raise NotImplementedError + + def check_interrupted(self): + if self._interrupted: + raise RuntimeError("job interrupted") + + def interrupt(self): + self._interrupted = True + + @staticmethod + def _signal_message(signal_num): + # once Python 3.8 is the minimum supported version, + # signal.strsignal can be used here + + signals = type(signal.SIGINT) + + try: + signal_str = f'{signals(signal_num).name} ({signal_num})' + except ValueError: + signal_str = f'{signal_num}' + + return f'Terminated by signal {signal_str}' + +class DirectOutputContext(JobContext): + def print(self, value, *, end='\n', file=sys.stdout, flush=False): + print(value, end=end, file=file, flush=flush) + + def subprocess(self, args, **kwargs): + return_code = subprocess.run(args, **kwargs).returncode + + if return_code < 0: + print(self._signal_message(-return_code), file=sys.stderr) + + return return_code == 0 + + +class QueuedOutputContext(JobContext): + def __init__(self, output_queue): + super().__init__() + self._output_queue = output_queue + + def print(self, value, *, end='\n', file=sys.stdout, flush=False): + self._output_queue.put((file, value + end)) + + def subprocess(self, args, **kwargs): + with subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + universal_newlines=True, **kwargs) as p: + for line in p.stdout: + self._output_queue.put((sys.stdout, line)) + return_code = p.wait() + + if return_code < 0: + self._output_queue.put((sys.stderr, self._signal_message(-return_code))) + + return return_code == 0 + +class JobWithQueuedOutput(): + def __init__(self, context, output_queue, future): + self._context = context + self._output_queue = output_queue + self._future = future + self._future.add_done_callback(lambda future: self._output_queue.put(None)) + + def complete(self): + for file, fragment in iter(self._output_queue.get, None): + print(fragment, end='', file=file, flush=True) # for simplicity, flush every fragment + + return self._future.result() + + def cancel(self): + self._context.interrupt() + self._future.cancel() + + +def run_in_parallel(num_jobs, f, work_items): + with concurrent.futures.ThreadPoolExecutor(num_jobs) as executor: + def start(work_item): + output_queue = queue.Queue() + context = QueuedOutputContext(output_queue) + return JobWithQueuedOutput( + context, output_queue, executor.submit(f, context, work_item)) + + jobs = list(map(start, work_items)) + + try: + return [job.complete() for job in jobs] + except BaseException: + for job in jobs: job.cancel() + raise + +EVENT_EMISSION_LOCK = threading.Lock() + +class Reporter: + GROUP_DECORATION = '#' * 16 + '||' + SECTION_DECORATION = '=' * 10 + ERROR_DECORATION = '#' * 10 + + def __init__(self, job_context, *, + enable_human_output=True, enable_json_output=False, event_context={}): + self.job_context = job_context + self.enable_human_output = enable_human_output + self.enable_json_output = enable_json_output + self.event_context = event_context + + def print_group_heading(self, format, *args): + if not self.enable_human_output: return + self.job_context.printf('{} {} {}', + self.GROUP_DECORATION, format.format(*args), self.GROUP_DECORATION[::-1]) + self.job_context.print('') + + def print_section_heading(self, format, *args): + if not self.enable_human_output: return + self.job_context.printf('{} {}', self.SECTION_DECORATION, format.format(*args), flush=True) + + def print_progress(self, format, *args): + if not self.enable_human_output: return + self.job_context.print(format.format(*args), end='\r' if sys.stdout.isatty() else '\n', flush=True) + + def end_progress(self): + if not self.enable_human_output: return + if sys.stdout.isatty(): + self.job_context.print('') + + def print(self, format='', *args, flush=False): + if not self.enable_human_output: return + self.job_context.printf(format, *args, flush=flush) + + def log_warning(self, format, *args, exc_info=False): + if exc_info: + self.job_context.print(traceback.format_exc(), file=sys.stderr, end='') + self.job_context.printf("{} Warning: {}", self.ERROR_DECORATION, format.format(*args), file=sys.stderr) + + def log_error(self, format, *args, exc_info=False): + if exc_info: + self.job_context.print(traceback.format_exc(), file=sys.stderr, end='') + self.job_context.printf("{} Error: {}", self.ERROR_DECORATION, format.format(*args), file=sys.stderr) + + def log_details(self, format, *args): + print(self.ERROR_DECORATION, ' ', format.format(*args), file=sys.stderr) + + def emit_event(self, type, **kwargs): + if not self.enable_json_output: return + + # We don't print machine-readable output through the job context, because + # we don't want it to be serialized. If we serialize it, then the consumer + # will lose information about the order of events, and we don't want that to happen. + # Instead, we emit events directly to stdout, but use a lock to ensure that + # JSON texts don't get interleaved. + with EVENT_EMISSION_LOCK: + json.dump({'$type': type, **self.event_context, **kwargs}, sys.stdout, indent=None) + print() + + def with_event_context(self, **kwargs): + return Reporter( + self.job_context, + enable_human_output=self.enable_human_output, + enable_json_output=self.enable_json_output, + event_context={**self.event_context, **kwargs}, + ) + +class DeserializationError(Exception): + def __init__(self, problem, contexts=()): + super().__init__(': '.join(contexts + (problem,))) + self.problem = problem + self.contexts = contexts + +@contextlib.contextmanager +def deserialization_context(context): + try: + yield None + except DeserializationError as exc: + raise DeserializationError(exc.problem, (context,) + exc.contexts) from exc + +def validate_string(context, value): + if not isinstance(value, str): + raise DeserializationError('{}: expected a string, got {!r}'.format(context, value)) + return value + +def validate_string_enum(context, value, known_values): + str_value = validate_string(context, value) + if str_value not in known_values: + raise DeserializationError('{}: expected one of {!r}, got {!r}'.format(context, known_values, value)) + return str_value + +def validate_relative_path(context, value): + path = Path(validate_string(context, value)) + + if path.anchor or '..' in path.parts: + raise DeserializationError('{}: disallowed absolute path or parent traversal'.format(context)) + + return path + +def validate_nonnegative_int(context, value): + if not isinstance(value, int) or value < 0: + raise DeserializationError( + '{}: expected a non-negative integer, got {!r}'.format(context, value)) + return value + +class TaggedBase: + @classmethod + def deserialize(cls, value): + try: + return cls.types[value['$type']].deserialize(value) + except KeyError: + raise DeserializationError('Unknown "$type": "{}"'.format(value['$type'])) + +class FileSource(TaggedBase): + RE_CONTENT_RANGE_VALUE = re.compile(r'bytes (\d+)-\d+/(?:\d+|\*)') + + types = {} + + @classmethod + def deserialize(cls, source): + if isinstance(source, str): + source = {'$type': 'http', 'url': source} + return super().deserialize(source) + + @classmethod + def http_range_headers(cls, offset): + if offset == 0: + return {} + + return { + 'Accept-Encoding': 'identity', + 'Range': 'bytes={}-'.format(offset), + } + + @classmethod + def handle_http_response(cls, response, chunk_size): + if response.status_code == requests.codes.partial_content: + match = cls.RE_CONTENT_RANGE_VALUE.fullmatch(response.headers.get('Content-Range', '')) + if not match: + # invalid range reply; return a negative offset to make + # the download logic restart the download. + return None, -1 + + return response.iter_content(chunk_size=chunk_size), int(match.group(1)) + + # either we didn't ask for a range, or the server doesn't support ranges + + if 'Content-Range' in response.headers: + # non-partial responses aren't supposed to have range information + return None, -1 + + return response.iter_content(chunk_size=chunk_size), 0 + + +class FileSourceHttp(FileSource): + def __init__(self, url): + self.url = url + + @classmethod + def deserialize(cls, source): + return cls(validate_string('"url"', source['url'])) + + def start_download(self, session, chunk_size, offset): + response = session.get(self.url, stream=True, timeout=DOWNLOAD_TIMEOUT, + headers=self.http_range_headers(offset)) + response.raise_for_status() + + return self.handle_http_response(response, chunk_size) + +FileSource.types['http'] = FileSourceHttp + +class FileSourceGoogleDrive(FileSource): + def __init__(self, id): + self.id = id + + @classmethod + def deserialize(cls, source): + return cls(validate_string('"id"', source['id'])) + + def start_download(self, session, chunk_size, offset): + range_headers = self.http_range_headers(offset) + URL = 'https://docs.google.com/uc?export=download' + response = session.get(URL, params={'id': self.id}, headers=range_headers, + stream=True, timeout=DOWNLOAD_TIMEOUT) + response.raise_for_status() + + for key, value in response.cookies.items(): + if key.startswith('download_warning'): + params = {'id': self.id, 'confirm': value} + response = session.get(URL, params=params, headers=range_headers, + stream=True, timeout=DOWNLOAD_TIMEOUT) + response.raise_for_status() + + return self.handle_http_response(response, chunk_size) + +FileSource.types['google_drive'] = FileSourceGoogleDrive + +class ModelFile: + def __init__(self, name, size, sha256, source): + self.name = name + self.size = size + self.sha256 = sha256 + self.source = source + + @classmethod + def deserialize(cls, file): + name = validate_relative_path('"name"', file['name']) + + with deserialization_context('In file "{}"'.format(name)): + size = validate_nonnegative_int('"size"', file['size']) + + sha256 = validate_string('"sha256"', file['sha256']) + + if not RE_SHA256SUM.fullmatch(sha256): + raise DeserializationError( + '"sha256": got invalid hash {!r}'.format(sha256)) + + with deserialization_context('"source"'): + source = FileSource.deserialize(file['source']) + + return cls(name, size, sha256, source) + +class Postproc(TaggedBase): + types = {} + +class PostprocRegexReplace(Postproc): + def __init__(self, file, pattern, replacement, count): + self.file = file + self.pattern = pattern + self.replacement = replacement + self.count = count + + @classmethod + def deserialize(cls, postproc): + return cls( + validate_relative_path('"file"', postproc['file']), + re.compile(validate_string('"pattern"', postproc['pattern'])), + validate_string('"replacement"', postproc['replacement']), + validate_nonnegative_int('"count"', postproc.get('count', 0)), + ) + + def apply(self, reporter, output_dir): + postproc_file = output_dir / self.file + + reporter.print_section_heading('Replacing text in {}', postproc_file) + + postproc_file_text = postproc_file.read_text(encoding='utf-8') + + orig_file = postproc_file.with_name(postproc_file.name + '.orig') + if not orig_file.exists(): + postproc_file.replace(orig_file) + + postproc_file_text, num_replacements = self.pattern.subn( + self.replacement, postproc_file_text, count=self.count) + + if num_replacements == 0: + raise RuntimeError('Invalid pattern: no occurrences found') + + if self.count != 0 and num_replacements != self.count: + raise RuntimeError('Invalid pattern: expected at least {} occurrences, but only {} found'.format( + self.count, num_replacements)) + + postproc_file.write_text(postproc_file_text, encoding='utf-8') + +Postproc.types['regex_replace'] = PostprocRegexReplace + +class PostprocUnpackArchive(Postproc): + def __init__(self, file, format): + self.file = file + self.format = format + + @classmethod + def deserialize(cls, postproc): + return cls( + validate_relative_path('"file"', postproc['file']), + validate_string('"format"', postproc['format']), + ) + + def apply(self, reporter, output_dir): + postproc_file = output_dir / self.file + + reporter.print_section_heading('Unpacking {}', postproc_file) + + shutil.unpack_archive(str(postproc_file), str(postproc_file.parent), self.format) + postproc_file.unlink() # Remove the archive + +Postproc.types['unpack_archive'] = PostprocUnpackArchive + +class Model: + def __init__(self, name, subdirectory, files, postprocessing, mo_args, quantizable, framework, + description, license_url, precisions, task_type, conversion_to_onnx_args): + self.name = name + self.subdirectory = subdirectory + self.files = files + self.postprocessing = postprocessing + self.mo_args = mo_args + self.quantizable = quantizable + self.framework = framework + self.description = description + self.license_url = license_url + self.precisions = precisions + self.task_type = task_type + self.conversion_to_onnx_args = conversion_to_onnx_args + self.converter_to_onnx = KNOWN_FRAMEWORKS[framework] + + @classmethod + def deserialize(cls, model, name, subdirectory): + with deserialization_context('In model "{}"'.format(name)): + if not RE_MODEL_NAME.fullmatch(name): + raise DeserializationError('Invalid name, must consist only of letters, digits or ._-') + + files = [] + file_names = set() + + for file in model['files']: + files.append(ModelFile.deserialize(file)) + + if files[-1].name in file_names: + raise DeserializationError( + 'Duplicate file name "{}"'.format(files[-1].name)) + file_names.add(files[-1].name) + + postprocessing = [] + + for i, postproc in enumerate(model.get('postprocessing', [])): + with deserialization_context('"postprocessing" #{}'.format(i)): + postprocessing.append(Postproc.deserialize(postproc)) + + framework = validate_string_enum('"framework"', model['framework'], KNOWN_FRAMEWORKS.keys()) + + conversion_to_onnx_args = model.get('conversion_to_onnx_args', None) + if KNOWN_FRAMEWORKS[framework]: + if not conversion_to_onnx_args: + raise DeserializationError('"conversion_to_onnx_args" is absent. ' + 'Framework "{}" is supported only by conversion to ONNX.' + .format(framework)) + conversion_to_onnx_args = [validate_string('"conversion_to_onnx_args" #{}'.format(i), arg) + for i, arg in enumerate(model['conversion_to_onnx_args'])] + else: + if conversion_to_onnx_args: + raise DeserializationError('Conversion to ONNX not supported for "{}" framework'.format(framework)) + + quantized = model.get('quantized', None) + if quantized is not None and quantized != 'INT8': + raise DeserializationError('"quantized": expected "INT8", got {!r}'.format(quantized)) + + if 'model_optimizer_args' in model: + mo_args = [validate_string('"model_optimizer_args" #{}'.format(i), arg) + for i, arg in enumerate(model['model_optimizer_args'])] + precisions = {f'FP16-{quantized}', f'FP32-{quantized}'} if quantized is not None else {'FP16', 'FP32'} + else: + if framework != 'dldt': + raise DeserializationError('Model not in IR format, but no conversions defined') + + mo_args = None + + files_per_precision = {} + + # for file in files: + # if len(file.name.parts) != 2: + # raise DeserializationError('Can\'t derive precision from file name {!r}'.format(file.name)) + # p = file.name.parts[0] + # if p not in KNOWN_PRECISIONS: + # raise DeserializationError( + # 'Unknown precision {!r} derived from file name {!r}, expected one of {!r}'.format( + # p, file.name, KNOWN_PRECISIONS)) + # files_per_precision.setdefault(p, set()).add(file.name.parts[1]) + + # for precision, precision_files in files_per_precision.items(): + # for ext in ['xml', 'bin']: + # if (name + '.' + ext) not in precision_files: + # raise DeserializationError('No {} file for precision "{}"'.format(ext.upper(), precision)) + + precisions = set(files_per_precision.keys()) + + quantizable = model.get('quantizable', False) + if not isinstance(quantizable, bool): + raise DeserializationError('"quantizable": expected a boolean, got {!r}'.format(quantizable)) + + description = validate_string('"description"', model['description']) + + license_url = validate_string('"license"', model['license']) + + task_type = validate_string_enum('"task_type"', model['task_type'], KNOWN_TASK_TYPES) + + return cls(name, subdirectory, files, postprocessing, mo_args, quantizable, framework, + description, license_url, precisions, task_type, conversion_to_onnx_args) + +def load_models(args): + models = [] + model_names = set() + + for config_path in sorted(MODEL_ROOT.glob('**/model.yml')): + subdirectory = config_path.parent.relative_to(MODEL_ROOT) + + with config_path.open('rb') as config_file, \ + deserialization_context('In config "{}"'.format(config_path)): + + model = yaml.safe_load(config_file) + + for bad_key in ['name', 'subdirectory']: + if bad_key in model: + raise DeserializationError('Unsupported key "{}"'.format(bad_key)) + + models.append(Model.deserialize(model, subdirectory.name, subdirectory)) + + if models[-1].name in model_names: + raise DeserializationError( + 'Duplicate model name "{}"'.format(models[-1].name)) + model_names.add(models[-1].name) + + return models + +def load_models_or_die(args): + try: + return load_models(args) + except DeserializationError as e: + indent = ' ' + + for i, context in enumerate(e.contexts): + print(indent * i + context + ':', file=sys.stderr) + print(indent * len(e.contexts) + e.problem, file=sys.stderr) + sys.exit(1) + +# requires the --print_all, --all, --name and --list arguments to be in `args` +def load_models_from_args(parser, args): + if args.print_all: + for model in load_models_or_die(args): + print(model.name) + sys.exit() + + filter_args_count = sum([args.all, args.name is not None, args.list is not None]) + + if filter_args_count > 1: + parser.error('at most one of "--all", "--name" or "--list" can be specified') + + if filter_args_count == 0: + parser.error('one of "--print_all", "--all", "--name" or "--list" must be specified') + + all_models = load_models_or_die(args) + + if args.all: + return all_models + elif args.name is not None or args.list is not None: + if args.name is not None: + patterns = args.name.split(',') + else: + patterns = [] + with args.list.open() as list_file: + for list_line in list_file: + tokens = shlex.split(list_line, comments=True) + if not tokens: continue + + patterns.append(tokens[0]) + # For now, ignore any other tokens in the line. + # We might use them as additional parameters later. + + models = collections.OrderedDict() # deduplicate models while preserving order + + for pattern in patterns: + matching_models = [model for model in all_models + if fnmatch.fnmatchcase(model.name, pattern)] + + if not matching_models: + sys.exit('No matching models: "{}"'.format(pattern)) + + for model in matching_models: + models[model.name] = model + + return list(models.values()) + +def quote_arg_windows(arg): + if not arg: return '""' + if not re.search(r'\s|"', arg): return arg + # On Windows, only backslashes that precede a quote or the end of the argument must be escaped. + return '"' + re.sub(r'(\\+)$', r'\1\1', re.sub(r'(\\*)"', r'\1\1\\"', arg)) + '"' + +if platform.system() == 'Windows': + quote_arg = quote_arg_windows +else: + quote_arg = shlex.quote + +def command_string(args): + return ' '.join(map(quote_arg, args)) diff --git a/examples/downloader/downloader.py b/examples/downloader/downloader.py new file mode 100755 index 000000000..dd7e84fbe --- /dev/null +++ b/examples/downloader/downloader.py @@ -0,0 +1,385 @@ +#!/usr/bin/env python3 + +""" + Copyright (c) 2018 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +import argparse +import contextlib +import functools +import hashlib +import re +import requests +import shutil +import ssl +import sys +import tempfile +import threading +import time +import types + +from pathlib import Path + +import common + +CHUNK_SIZE = 1 << 15 if sys.stdout.isatty() else 1 << 20 + +def process_download(reporter, chunk_iterable, size, progress, file): + start_time = time.monotonic() + start_size = progress.size + + try: + for chunk in chunk_iterable: + reporter.job_context.check_interrupted() + + if chunk: + duration = time.monotonic() - start_time + progress.size += len(chunk) + progress.hasher.update(chunk) + + if duration != 0: + speed = int((progress.size - start_size) / (1024 * duration)) + else: + speed = '?' + + percent = progress.size * 100 // size + + reporter.print_progress('... {}%, {} KB, {} KB/s, {} seconds passed', + percent, progress.size // 1024, speed, int(duration)) + reporter.emit_event('model_file_download_progress', size=progress.size) + + file.write(chunk) + + # don't attempt to finish a file if it's bigger than expected + if progress.size > size: + break + finally: + reporter.end_progress() + +def try_download(reporter, file, num_attempts, start_download, size): + progress = types.SimpleNamespace(size=0) + + for attempt in range(num_attempts): + if attempt != 0: + retry_delay = 10 + reporter.print("Will retry in {} seconds...", retry_delay, flush=True) + time.sleep(retry_delay) + + try: + reporter.job_context.check_interrupted() + chunk_iterable, continue_offset = start_download(offset=progress.size) + + if continue_offset not in {0, progress.size}: + # Somehow we neither restarted nor continued from where we left off. + # Try to restart. + chunk_iterable, continue_offset = start_download(offset=0) + if continue_offset != 0: + reporter.log_error("Remote server refuses to send whole file, aborting") + return None + + if continue_offset == 0: + file.seek(0) + file.truncate() + progress.size = 0 + progress.hasher = hashlib.sha256() + + process_download(reporter, chunk_iterable, size, progress, file) + + if progress.size > size: + reporter.log_error("Remote file is longer than expected ({} B), download aborted", size) + # no sense in retrying - if the file is longer, there's no way it'll fix itself + return None + elif progress.size < size: + reporter.log_error("Downloaded file is shorter ({} B) than expected ({} B)", + progress.size, size) + # it's possible that we got disconnected before receiving the full file, + # so try again + else: + return progress.hasher.digest() + except (requests.exceptions.RequestException, ssl.SSLError): + reporter.log_error("Download failed", exc_info=True) + + return None + +def verify_hash(reporter, actual_hash, expected_hash, path): + if actual_hash != bytes.fromhex(expected_hash): + reporter.log_error('Hash mismatch for "{}"', path) + reporter.log_details('Expected: {}', expected_hash) + reporter.log_details('Actual: {}', actual_hash.hex()) + return False + return True + +class NullCache: + def has(self, hash): return False + def get(self, model_file, path, reporter): return False + def put(self, hash, path): pass + +class DirCache: + _FORMAT = 1 # increment if backwards-incompatible changes to the format are made + _HASH_LEN = hashlib.sha256().digest_size * 2 + + def __init__(self, cache_dir): + self._cache_dir = cache_dir / str(self._FORMAT) + self._cache_dir.mkdir(parents=True, exist_ok=True) + + self._staging_dir = self._cache_dir / 'staging' + self._staging_dir.mkdir(exist_ok=True) + + def _hash_path(self, hash): + hash = hash.lower() + assert len(hash) == self._HASH_LEN + assert re.fullmatch('[0-9a-f]+', hash) + return self._cache_dir / hash[:2] / hash[2:] + + def has(self, hash): + return self._hash_path(hash).exists() + + def get(self, model_file, path, reporter): + cache_path = self._hash_path(model_file.sha256) + cache_sha256 = hashlib.sha256() + cache_size = 0 + + with open(cache_path, 'rb') as cache_file, open(path, 'wb') as destination_file: + while True: + data = cache_file.read(CHUNK_SIZE) + if not data: + break + cache_size += len(data) + if cache_size > model_file.size: + reporter.log_error("Cached file is longer than expected ({} B), copying aborted", model_file.size) + return False + cache_sha256.update(data) + destination_file.write(data) + if cache_size < model_file.size: + reporter.log_error("Cached file is shorter ({} B) than expected ({} B)", cache_size, model_file.size) + return False + return verify_hash(reporter, cache_sha256.digest(), model_file.sha256, path) + + def put(self, hash, path): + staging_path = None + + try: + # A file in the cache must have the hash implied by its name. So when we upload a file, + # we first copy it to a temporary file and then atomically move it to the desired name. + # This prevents interrupted runs from corrupting the cache. + with path.open('rb') as src_file: + with tempfile.NamedTemporaryFile(dir=str(self._staging_dir), delete=False) as staging_file: + staging_path = Path(staging_file.name) + shutil.copyfileobj(src_file, staging_file) + + hash_path = self._hash_path(hash) + hash_path.parent.mkdir(parents=True, exist_ok=True) + staging_path.replace(self._hash_path(hash)) + staging_path = None + finally: + # If we failed to complete our temporary file or to move it into place, + # get rid of it. + if staging_path: + staging_path.unlink() + +def try_retrieve_from_cache(reporter, cache, model_file, destination): + try: + if cache.has(model_file.sha256): + reporter.job_context.check_interrupted() + + reporter.print_section_heading('Retrieving {} from the cache', destination) + if not cache.get(model_file, destination, reporter): + reporter.print('Will retry from the original source.') + reporter.print() + return False + reporter.print() + return True + except Exception: + reporter.log_warning('Cache retrieval failed; falling back to downloading', exc_info=True) + reporter.print() + + return False + +def try_update_cache(reporter, cache, hash, source): + try: + cache.put(hash, source) + except Exception: + reporter.log_warning('Failed to update the cache', exc_info=True) + +def try_retrieve(reporter, destination, model_file, cache, num_attempts, start_download): + destination.parent.mkdir(parents=True, exist_ok=True) + + if try_retrieve_from_cache(reporter, cache, model_file, destination): + return True + + reporter.print_section_heading('Downloading {}', destination) + + success = False + + with destination.open('w+b') as f: + actual_hash = try_download(reporter, f, num_attempts, start_download, model_file.size) + + if actual_hash and verify_hash(reporter, actual_hash, model_file.sha256, destination): + try_update_cache(reporter, cache, model_file.sha256, destination) + success = True + + reporter.print() + return success + +def download_model(reporter, args, cache, session_factory, requested_precisions, model): + session = session_factory() + + reporter.print_group_heading('Downloading {}', model.name) + + reporter.emit_event('model_download_begin', model=model.name, num_files=len(model.files)) + + output = args.output_dir #/ model.subdirectory + output.mkdir(parents=True, exist_ok=True) + + for model_file in model.files: + # if len(model_file.name.parts) == 2: + # p = model_file.name.parts[0] + # if p in common.KNOWN_PRECISIONS and p not in requested_precisions: + # continue + + model_file_reporter = reporter.with_event_context(model=model.name, model_file=model_file.name.as_posix()) + model_file_reporter.emit_event('model_file_download_begin', size=model_file.size) + + destination = output / model_file.name + + if not try_retrieve(model_file_reporter, destination, model_file, cache, args.num_attempts, + functools.partial(model_file.source.start_download, session, CHUNK_SIZE)): + try: + destination.unlink() + except FileNotFoundError: + pass + + model_file_reporter.emit_event('model_file_download_end', successful=False) + reporter.emit_event('model_download_end', model=model.name, successful=False) + return False + + model_file_reporter.emit_event('model_file_download_end', successful=True) + + reporter.emit_event('model_download_end', model=model.name, successful=True) + + if model.postprocessing: + reporter.emit_event('model_postprocessing_begin', model=model.name) + + for postproc in model.postprocessing: + postproc.apply(reporter, output) + + reporter.emit_event('model_postprocessing_end', model=model.name) + + reporter.print() + + return True + + +class DownloaderArgumentParser(argparse.ArgumentParser): + def error(self, message): + sys.stderr.write('error: %s\n' % message) + self.print_help() + sys.exit(2) + +def positive_int_arg(value_str): + try: + value = int(value_str) + if value > 0: return value + except ValueError: + pass + + raise argparse.ArgumentTypeError('must be a positive integer (got {!r})'.format(value_str)) + + +# There is no evidence that the requests.Session class is thread-safe, +# so for safety, we use one Session per thread. This class ensures that +# each thread gets its own Session. +class ThreadSessionFactory: + def __init__(self, exit_stack): + self._lock = threading.Lock() + self._thread_local = threading.local() + self._exit_stack = exit_stack + + def __call__(self): + try: + session = self._thread_local.session + except AttributeError: + with self._lock: # ExitStack might not be thread-safe either + session = self._exit_stack.enter_context(requests.Session()) + self._thread_local.session = session + return session + + +def main(): + parser = DownloaderArgumentParser() + parser.add_argument('--name', metavar='PAT[,PAT...]', + help='download only models whose names match at least one of the specified patterns') + parser.add_argument('--list', type=Path, metavar='FILE.LST', + help='download only models whose names match at least one of the patterns in the specified file') + parser.add_argument('--all', action='store_true', help='download all available models') + parser.add_argument('--print_all', action='store_true', help='print all available models') + parser.add_argument('--precisions', metavar='PREC[,PREC...]', + help='download only models with the specified precisions (actual for DLDT networks)') + parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR', + default=Path.cwd(), help='path where to save models') + parser.add_argument('--cache_dir', type=Path, metavar='DIR', + help='directory to use as a cache for downloaded files') + parser.add_argument('--num_attempts', type=positive_int_arg, metavar='N', default=1, + help='attempt each download up to N times') + parser.add_argument('--progress_format', choices=('text', 'json'), default='text', + help='which format to use for progress reporting') + # unlike Model Converter, -jauto is not supported here, because CPU count has no + # relation to the optimal number of concurrent downloads + parser.add_argument('-j', '--jobs', type=positive_int_arg, metavar='N', default=1, + help='how many downloads to perform concurrently') + + args = parser.parse_args() + + def make_reporter(context): + return common.Reporter(context, + enable_human_output=args.progress_format == 'text', + enable_json_output=args.progress_format == 'json') + + reporter = make_reporter(common.DirectOutputContext()) + + cache = NullCache() if args.cache_dir is None else DirCache(args.cache_dir) + models = common.load_models_from_args(parser, args) + + failed_models = set() + + if args.precisions is None: + requested_precisions = common.KNOWN_PRECISIONS + else: + requested_precisions = set(args.precisions.split(',')) + unknown_precisions = requested_precisions - common.KNOWN_PRECISIONS + if unknown_precisions: + sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions)))) + + with contextlib.ExitStack() as exit_stack: + session_factory = ThreadSessionFactory(exit_stack) + if args.jobs == 1: + results = [download_model(reporter, args, cache, session_factory, requested_precisions, model) + for model in models] + else: + results = common.run_in_parallel(args.jobs, + lambda context, model: download_model( + make_reporter(context), args, cache, session_factory, requested_precisions, model), + models) + + failed_models = {model.name for model, successful in zip(models, results) if not successful} + + if failed_models: + reporter.print('FAILED:') + for failed_model_name in failed_models: + reporter.print(failed_model_name) + sys.exit(1) + +if __name__ == '__main__': + main() diff --git a/examples/models/.gitignore b/examples/models/.gitignore new file mode 100644 index 000000000..2d85c3ce1 --- /dev/null +++ b/examples/models/.gitignore @@ -0,0 +1,2 @@ +*.blob +*.mp4 \ No newline at end of file diff --git a/examples/models/mobilenet-ssd/model.yml b/examples/models/mobilenet-ssd/model.yml new file mode 100644 index 000000000..bbc89411d --- /dev/null +++ b/examples/models/mobilenet-ssd/model.yml @@ -0,0 +1,33 @@ +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +description: >- + mobilenet-ssd +task_type: object_attributes +files: + - name: mobilenet-ssd_openvino_2021.2_6shave.blob + size: 14510848 + sha256: 5150d0e5d18abd0ecb21c8280e09870977358c04a7d2cfa539e1e0f6c2a93e71 + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_6shave.blob + - name: mobilenet-ssd_openvino_2021.2_5shave.blob + size: 14531840 + sha256: c682a0f9be33ce601ce460abc580e3488ced413a7c597dfab4b74ea407d7c6d6 + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_5shave.blob + - name: mobilenet-ssd_openvino_2021.2_8shave.blob + size: 14505024 + sha256: e0c60156ee97b01ac115ad838d13c8d90559064fec04c6d423bb03fdc40524eb + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_8shave.blob + +framework: dldt +license: https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/LICENSE diff --git a/examples/models/tiny-yolo/model.yml b/examples/models/tiny-yolo/model.yml new file mode 100644 index 000000000..6f8ddb454 --- /dev/null +++ b/examples/models/tiny-yolo/model.yml @@ -0,0 +1,29 @@ +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +description: >- + tiny-yolo +task_type: object_attributes +files: + - name: tiny-yolo-v3_openvino_2021.2_6shave.blob + size: 17752512 + sha256: 274540d3010765fbe505e2ba6bb5e380c021c2c0c13d7f9d1672fd4af38b8d15 + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/tiny-yolo-v3_openvino_2021.2_6shave.blob + - name: tiny-yolo-v4_openvino_2021.2_6shave.blob + size: 12172416 + sha256: 984c50e229652be9c25092df487185eae29e80e4ad7964ff3632f477dbf5e851 + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/tiny-yolo-v4_openvino_2021.2_6shave.blob + +framework: dldt +license: https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/LICENSE diff --git a/examples/models/video-resources/model.yml b/examples/models/video-resources/model.yml new file mode 100644 index 000000000..224b3c616 --- /dev/null +++ b/examples/models/video-resources/model.yml @@ -0,0 +1,26 @@ +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +description: >- + video-resources +task_type: object_attributes +files: + - name: construction_vest.mp4 + size: 5423604 + sha256: 2f35ea35a41e98ee17dc9136c495ed0ff3aa7ba6774d5eedc2b9935350c6084f + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/construction_vest.mp4 + + +framework: dldt +license: https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/LICENSE From ca3210f9f28963423a53dfb4d35fe2efce0a8c56 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Thu, 1 Apr 2021 22:57:27 +0300 Subject: [PATCH 23/36] Download models on install_requirements.py --- examples/install_requirements.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/install_requirements.py b/examples/install_requirements.py index 399d49f0f..1558ce8ed 100755 --- a/examples/install_requirements.py +++ b/examples/install_requirements.py @@ -6,7 +6,7 @@ import find_version # 3rdparty dependencies to install -DEPENDENCIES = ['opencv-python'] +DEPENDENCIES = ['opencv-python', 'pyyaml', 'requests'] # Constants ARTIFACTORY_URL = 'https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local' @@ -56,3 +56,6 @@ if not success: print("Couldn't install dependencies as wheels and trying to compile from sources failed") print("Check https://github.com/luxonis/depthai-python#dependencies on retrieving dependencies for compiling from sources") + +# current dir was changed to root of depthai-python +subprocess.check_call([sys.executable, "examples/downloader/downloader.py", "--all", "--cache_dir", "examples/downloader/", "-o", "examples/models"]) From 522de2417301781868bf4645a8c41236b83c90c4 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Thu, 1 Apr 2021 23:02:31 +0300 Subject: [PATCH 24/36] Change to absolute examples path --- examples/install_requirements.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/install_requirements.py b/examples/install_requirements.py index 1558ce8ed..28131a3de 100755 --- a/examples/install_requirements.py +++ b/examples/install_requirements.py @@ -1,5 +1,7 @@ #!/usr/bin/env python3 import sys, os, subprocess +examples_dir = os.path.dirname(os.path.abspath(__file__)) + parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) os.chdir(parent_dir) sys.path.insert(1, parent_dir) @@ -57,5 +59,4 @@ print("Couldn't install dependencies as wheels and trying to compile from sources failed") print("Check https://github.com/luxonis/depthai-python#dependencies on retrieving dependencies for compiling from sources") -# current dir was changed to root of depthai-python -subprocess.check_call([sys.executable, "examples/downloader/downloader.py", "--all", "--cache_dir", "examples/downloader/", "-o", "examples/models"]) +subprocess.check_call([sys.executable, f"{examples_dir}/downloader/downloader.py", "--all", "--cache_dir", f"{examples_dir}/downloader/", "--num_attempts", "5", "-o", f"{examples_dir}/models"]) From 62b82c8f814b48ce109d3f78213281cf4965fdae Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Sat, 3 Apr 2021 21:40:32 +0300 Subject: [PATCH 25/36] Refactor tests; use install_requirements.py script --- examples/CMakeLists.txt | 121 +++++++++++++------------------ examples/install_requirements.py | 76 +++++++++++-------- 2 files changed, 94 insertions(+), 103 deletions(-) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 93ff2e6b7..4308e43ed 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -12,6 +12,15 @@ if(UNIX) set(SYS_PATH_SEPARATOR ":") endif() +add_custom_target(install_requirements + # Python path (to find compiled module) + "PYTHONPATH=$${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}" + # Example + COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_LIST_DIR}/install_requirements.py" "--skip_depthai" + DEPENDS ${TARGET_NAME} + VERBATIM + COMMAND_EXPAND_LISTS +) # Macro for adding new python test macro(add_python_example example_name python_script_path) @@ -20,89 +29,59 @@ macro(add_python_example example_name python_script_path) list(REMOVE_AT arguments 0 1) # Creates a target (python my_test [args]) - add_custom_target(${example_name} - ${CMAKE_COMMAND} -E env + add_custom_target(${example_name} + ${CMAKE_COMMAND} -E env # Environment variables # Python path (to find compiled module) "PYTHONPATH=$${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}" # ASAN in case of sanitizers - "${ASAN_ENVIRONMENT_VARS}" + "${ASAN_ENVIRONMENT_VARS}" # Example ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_LIST_DIR}/${python_script_path} ${ARGN} - DEPENDS ${TARGET_NAME} + DEPENDS ${TARGET_NAME} install_requirements VERBATIM COMMAND_EXPAND_LISTS ) if(DEPTHAI_PYTHON_TEST_EXAMPLES) - + # Adds test with 5 seconds timeout and bumps all python warnings to errors - add_test(NAME ${example_name} COMMAND - ${CMAKE_COMMAND} -E env + add_test(NAME ${example_name} COMMAND + ${CMAKE_COMMAND} -E env # Python path (to find compiled module) "PYTHONPATH=$${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}" # ASAN in case of sanitizers ${ASAN_ENVIRONMENT_VARS} - ${CMAKE_COMMAND} -DTIMEOUT_SECONDS=5 -P ${CMAKE_CURRENT_LIST_DIR}/cmake/ExecuteTestTimeout.cmake + ${CMAKE_COMMAND} -DTIMEOUT_SECONDS=5 -P ${CMAKE_CURRENT_LIST_DIR}/cmake/ExecuteTestTimeout.cmake # Actual script to run ${PYTHON_EXECUTABLE} -Werror "${CMAKE_CURRENT_LIST_DIR}/${python_script_path}" ${arguments} ) # Sets a regex catching any logged warnings, errors or critical (coming either from device or host) - set_tests_properties (${example_name} PROPERTIES FAIL_REGULAR_EXPRESSION "\\[warning\\];\\[error\\];\\[critical\\]") + set_tests_properties (${example_name} PROPERTIES FAIL_REGULAR_EXPRESSION "\\[warning\\];\\[error\\];\\[critical\\]") endif() -endmacro() - -# Mobilenet resource -hunter_private_data( - URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_6shave.blob" - SHA1 "f0e14978b3f77a4f93b9f969cd39e58bb7aef490" - FILE "mobilenet-ssd_openvino_2021.2_6shave.blob" - LOCATION mobilenet_blob -) - -# Mobilenet resource -hunter_private_data( - URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_5shave.blob" - SHA1 "d715f85e474609cf3f696d7a2e3750804ed6c726" - FILE "mobilenet-ssd_openvino_2021.2_5shave.blob" - LOCATION mobilenet_5shave_blob -) - -# Construction vest video resource -hunter_private_data( - URL "http://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/construction_vest.mp4" - SHA1 "271d8d0b702e683ce02957db7c100843de5ceaec" - FILE "construction_vest.mp4" - LOCATION construction_vest -) +endmacro() -# tiny-YoloV3 neural network resource -hunter_private_data( - URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/tiny-yolo-v3_openvino_2021.2_6shave.blob" - SHA1 "f0ac263a0d55c374e1892eea21c9b7d1170bde46" - FILE "tiny-yolo-v3_openvino_2021.2_6shave.blob" - LOCATION tiny_yolo_v3_blob -) +if(DEPTHAI_PYTHON_TEST_EXAMPLES) -# tiny-YoloV4 neural network resource -hunter_private_data( - URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/tiny-yolo-v4_openvino_2021.2_6shave.blob" - SHA1 "219d949610a5760e62a8458941e1300b81c3fe4a" - FILE "tiny-yolo-v4_openvino_2021.2_6shave.blob" - LOCATION tiny_yolo_v4_blob -) + # Adds install requirements test with 5 minute timeout + add_test(NAME install_requirements COMMAND + ${CMAKE_COMMAND} -E env + # Python path (to find compiled module) + "PYTHONPATH=$${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}" + # ASAN in case of sanitizers + ${ASAN_ENVIRONMENT_VARS} + ${CMAKE_COMMAND} -DTIMEOUT_SECONDS=300 -P ${CMAKE_CURRENT_LIST_DIR}/cmake/ExecuteTestTimeout.cmake + # Actual script to run + ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_LIST_DIR}/install_requirements.py" "--skip_depthai" + ) -# NeuralNetwork node, mobilenet example, 8 shaves -hunter_private_data( - URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_8shave.blob" - SHA1 "3329bb8f3a9c881ef9756d232055f9d6f38aa07b" - FILE "mobilenet-ssd_openvino_2021.2_8shave.blob" - LOCATION mobilenet_8shaves_blob -) + # Sets a regex catching any logged warnings, errors or critical (coming either from device or host) + set_tests_properties (install_requirements PROPERTIES FAIL_REGULAR_EXPRESSION "\\[warning\\];\\[error\\];\\[critical\\]") +endif() # Add examples add_python_example(01_rgb_preview 01_rgb_preview.py) @@ -112,28 +91,28 @@ add_python_example(04_rgb_encoding 04_rgb_encoding.py) add_python_example(05_rgb_mono_encoding 05_rgb_mono_encoding.py) add_python_example(06_rgb_full_resolution_saver 06_rgb_full_resolution_saver.py) add_python_example(07_mono_full_resolution_saver 07_mono_full_resolution_saver.py) -add_python_example(08_rgb_mobilenet 08_rgb_mobilenet.py "${mobilenet_blob}") -add_python_example(09_mono_mobilenet 09_mono_mobilenet.py "${mobilenet_blob}") -add_python_example(10_mono_depth_mobilenetssd 10_mono_depth_mobilenetssd.py "${mobilenet_blob}") -add_python_example(11_rgb_encoding_mono_mobilenet 11_rgb_encoding_mono_mobilenet.py "${mobilenet_blob}") -add_python_example(12_rgb_encoding_mono_mobilenet_depth 12_rgb_encoding_mono_mobilenet_depth.py "${mobilenet_blob}") +add_python_example(08_rgb_mobilenet 08_rgb_mobilenet.py) +add_python_example(09_mono_mobilenet 09_mono_mobilenet.py) +add_python_example(10_mono_depth_mobilenetssd 10_mono_depth_mobilenetssd.py) +add_python_example(11_rgb_encoding_mono_mobilenet 11_rgb_encoding_mono_mobilenet.py) +add_python_example(12_rgb_encoding_mono_mobilenet_depth 12_rgb_encoding_mono_mobilenet_depth.py) add_python_example(13_encoding_max_limit 13_encoding_max_limit.py) add_python_example(14_color_camera_control 14_color_camera_control.py) -add_python_example(15_rgb_mobilenet_4k 15_rgb_mobilenet_4k.py "${mobilenet_5shave_blob}") +add_python_example(15_rgb_mobilenet_4k 15_rgb_mobilenet_4k.py) add_python_example(16_device_queue_event 16_device_queue_event.py) -add_python_example(17_video_mobilenet 17_video_mobilenet.py "${mobilenet_8shaves_blob}" "${construction_vest}") -add_python_example(18_rgb_encoding_mobilenet 18_rgb_encoding_mobilenet.py "${mobilenet_blob}") +add_python_example(17_video_mobilenet 17_video_mobilenet.py) +add_python_example(18_rgb_encoding_mobilenet 18_rgb_encoding_mobilenet.py) add_python_example(19_mono_camera_control 19_mono_camera_control.py) add_python_example(20_color_rotate_warp 20_color_rotate_warp.py) -add_python_example(21_mobilenet_device_side_decoding 21_mobilenet_device_side_decoding.py "${mobilenet_blob}") -add_python_example(22_1_tiny_yolo_v3_device_side_decoding 22_1_tiny_yolo_v3_device_side_decoding.py "${tiny_yolo_v3_blob}") -add_python_example(22_2_tiny_yolo_v4_device_side_decoding 22_2_tiny_yolo_v4_device_side_decoding.py "${tiny_yolo_v4_blob}") -add_python_example(23_autoexposure_roi 23_autoexposure_roi.py "${mobilenet_blob}") +add_python_example(21_mobilenet_device_side_decoding 21_mobilenet_device_side_decoding.py) +add_python_example(22_1_tiny_yolo_v3_device_side_decoding 22_1_tiny_yolo_v3_device_side_decoding.py) +add_python_example(22_2_tiny_yolo_v4_device_side_decoding 22_2_tiny_yolo_v4_device_side_decoding.py) +add_python_example(23_autoexposure_roi 23_autoexposure_roi.py) add_python_example(24_opencv_support 24_opencv_support.py) add_python_example(25_system_information 25_system_information.py) -add_python_example(26_1_spatial_mobilenet 26_1_spatial_mobilenet.py "${mobilenet_blob}") -add_python_example(26_2_spatial_mobilenet_mono 26_2_spatial_mobilenet_mono.py "${mobilenet_blob}") -add_python_example(26_3_spatial_tiny_yolo_v3 26_3_spatial_tiny_yolo.py "${tiny_yolo_v3_blob}") -add_python_example(26_3_spatial_tiny_yolo_v4 26_3_spatial_tiny_yolo.py "${tiny_yolo_v4_blob}") +add_python_example(26_1_spatial_mobilenet 26_1_spatial_mobilenet.py) +add_python_example(26_2_spatial_mobilenet_mono 26_2_spatial_mobilenet_mono.py) +add_python_example(26_3_spatial_tiny_yolo_v3 26_3_spatial_tiny_yolo.py) +add_python_example(26_3_spatial_tiny_yolo_v4 26_3_spatial_tiny_yolo.py) add_python_example(27_spatial_location_calculator 27_spatial_location_calculator.py) add_python_example(28_camera_video_example 28_camera_video_example.py) diff --git a/examples/install_requirements.py b/examples/install_requirements.py index 28131a3de..8d01bd3c4 100755 --- a/examples/install_requirements.py +++ b/examples/install_requirements.py @@ -1,5 +1,11 @@ #!/usr/bin/env python3 import sys, os, subprocess +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('-sdai', "--skip_depthai", action="store_true", help="Skip installation of depthai library.") +args = parser.parse_args() + examples_dir = os.path.dirname(os.path.abspath(__file__)) parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -25,38 +31,44 @@ # Install opencv-python subprocess.check_call([*pip_install, *DEPENDENCIES]) -# Check if in git context and retrieve some information -git_context = True -git_commit = "" -git_branch = "" -try: - git_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('UTF-8').strip() - git_branch = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode('UTF-8').strip() -except (OSError, CalledProcessError) as e: - git_context = False - -# Install depthai depending on context -if not git_context or git_branch == 'main': - # Install latest pypi depthai release - subprocess.check_call([*pip_install, '-U', '--force-reinstall', 'depthai']) -elif git_context: - # Get package version if in git context - final_version = find_version.get_package_dev_version(git_commit) - # Install latest built wheels from artifactory (0.0.0.0+[hash] or [version]+[hash]) - commands = [[*pip_install, "--extra-index-url", ARTIFACTORY_URL, "depthai=="+final_version], - [*pip_install, "."]] - success = False - for command in commands: +if not args.skip_depthai: + # Check if in git context and retrieve some information + git_context = True + git_commit = "" + git_branch = "" + try: + git_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('UTF-8').strip() + git_branch = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode('UTF-8').strip() + except (OSError, subprocess.CalledProcessError) as e: + git_context = False + + # Install depthai depending on context + if not git_context or git_branch == 'main': + # Install latest pypi depthai release + subprocess.check_call([*pip_install, '-U', '--force-reinstall', 'depthai']) + elif git_context: try: - success = subprocess.call(command) == 0 - except (OSError, CalledProcessError) as e: - success = False - if success: - break - - # If all commands failed - if not success: - print("Couldn't install dependencies as wheels and trying to compile from sources failed") - print("Check https://github.com/luxonis/depthai-python#dependencies on retrieving dependencies for compiling from sources") + subprocess.check_output(['git', 'submodule', 'update', '--init', '--recursive']) + except (OSError, subprocess.CalledProcessError) as e: + print("git submodule update failed!") + raise + # Get package version if in git context + final_version = find_version.get_package_dev_version(git_commit) + # Install latest built wheels from artifactory (0.0.0.0+[hash] or [version]+[hash]) + commands = [[*pip_install, "--extra-index-url", ARTIFACTORY_URL, "depthai=="+final_version], + [*pip_install, "."]] + success = False + for command in commands: + try: + success = subprocess.call(command) == 0 + except (OSError, subprocess.CalledProcessError) as e: + success = False + if success: + break + + # If all commands failed + if not success: + print("Couldn't install dependencies as wheels and trying to compile from sources failed") + print("Check https://github.com/luxonis/depthai-python#dependencies on retrieving dependencies for compiling from sources") subprocess.check_call([sys.executable, f"{examples_dir}/downloader/downloader.py", "--all", "--cache_dir", f"{examples_dir}/downloader/", "--num_attempts", "5", "-o", f"{examples_dir}/models"]) From 86bc5193ea5b27be4be956ab86ce9a44c1eafdd8 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Sat, 3 Apr 2021 12:44:04 +0200 Subject: [PATCH 26/36] added notification when required file/s doesn't exist and prompt user to run install_requirements.py. Also changed the path to the video in demo 17 --- examples/08_rgb_mobilenet.py | 3 +++ examples/09_mono_mobilenet.py | 2 ++ examples/10_mono_depth_mobilenetssd.py | 2 ++ examples/11_rgb_encoding_mono_mobilenet.py | 2 ++ examples/12_rgb_encoding_mono_mobilenet_depth.py | 2 ++ examples/15_rgb_mobilenet_4k.py | 3 +++ examples/17_video_mobilenet.py | 8 ++++++-- examples/18_rgb_encoding_mobilenet.py | 2 ++ examples/22_1_tiny_yolo_v3_device_side_decoding.py | 3 +++ examples/22_2_tiny_yolo_v4_device_side_decoding.py | 3 +++ examples/23_autoexposure_roi.py | 3 +++ examples/26_1_spatial_mobilenet.py | 3 +++ examples/26_2_spatial_mobilenet_mono.py | 3 +++ examples/26_3_spatial_tiny_yolo.py | 3 +++ 14 files changed, 40 insertions(+), 2 deletions(-) diff --git a/examples/08_rgb_mobilenet.py b/examples/08_rgb_mobilenet.py index f30a2a572..e18adc1f2 100755 --- a/examples/08_rgb_mobilenet.py +++ b/examples/08_rgb_mobilenet.py @@ -13,6 +13,9 @@ parser.add_argument('-s', '--sync', action="store_true", help="Sync RGB output with NN output", default=False) args = parser.parse_args() +if not Path(nnPathDefault).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/09_mono_mobilenet.py b/examples/09_mono_mobilenet.py index 63602f284..a823b33cd 100755 --- a/examples/09_mono_mobilenet.py +++ b/examples/09_mono_mobilenet.py @@ -11,6 +11,8 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/10_mono_depth_mobilenetssd.py b/examples/10_mono_depth_mobilenetssd.py index c55ea3300..de7d986b9 100755 --- a/examples/10_mono_depth_mobilenetssd.py +++ b/examples/10_mono_depth_mobilenetssd.py @@ -11,6 +11,8 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/11_rgb_encoding_mono_mobilenet.py b/examples/11_rgb_encoding_mono_mobilenet.py index 578bbb96b..c8e636ba3 100755 --- a/examples/11_rgb_encoding_mono_mobilenet.py +++ b/examples/11_rgb_encoding_mono_mobilenet.py @@ -11,6 +11,8 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") pipeline = dai.Pipeline() diff --git a/examples/12_rgb_encoding_mono_mobilenet_depth.py b/examples/12_rgb_encoding_mono_mobilenet_depth.py index 0de36b0d0..34581891b 100755 --- a/examples/12_rgb_encoding_mono_mobilenet_depth.py +++ b/examples/12_rgb_encoding_mono_mobilenet_depth.py @@ -11,6 +11,8 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") pipeline = dai.Pipeline() diff --git a/examples/15_rgb_mobilenet_4k.py b/examples/15_rgb_mobilenet_4k.py index 007745530..c9c9653ad 100755 --- a/examples/15_rgb_mobilenet_4k.py +++ b/examples/15_rgb_mobilenet_4k.py @@ -11,6 +11,9 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/17_video_mobilenet.py b/examples/17_video_mobilenet.py index a5691ec05..7269c8c0c 100755 --- a/examples/17_video_mobilenet.py +++ b/examples/17_video_mobilenet.py @@ -8,12 +8,16 @@ from time import monotonic # Get argument first -nnPath = str((Path(__file__).parent / Path('models/mobilenet-ssd_openvino_2021.2_8shave.blob')).resolve().absolute()) -videoPath = str(Path("./construction_vest.mp4").resolve().absolute()) +parentDir = Path(__file__).parent +nnPath = str((parentDir / Path('models/mobilenet-ssd_openvino_2021.2_8shave.blob')).resolve().absolute()) +videoPath = str((parentDir / Path('models/construction_vest.mp4')).resolve().absolute()) if len(sys.argv) > 2: nnPath = sys.argv[1] videoPath = sys.argv[2] +if not Path(nnPath).exists() or not Path(videoPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/18_rgb_encoding_mobilenet.py b/examples/18_rgb_encoding_mobilenet.py index edf3be161..df5579eca 100755 --- a/examples/18_rgb_encoding_mobilenet.py +++ b/examples/18_rgb_encoding_mobilenet.py @@ -11,6 +11,8 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") pipeline = dai.Pipeline() diff --git a/examples/22_1_tiny_yolo_v3_device_side_decoding.py b/examples/22_1_tiny_yolo_v3_device_side_decoding.py index 3dd8cbc50..af2c359df 100755 --- a/examples/22_1_tiny_yolo_v3_device_side_decoding.py +++ b/examples/22_1_tiny_yolo_v3_device_side_decoding.py @@ -38,6 +38,9 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/22_2_tiny_yolo_v4_device_side_decoding.py b/examples/22_2_tiny_yolo_v4_device_side_decoding.py index d33d95286..9dcf72c5e 100755 --- a/examples/22_2_tiny_yolo_v4_device_side_decoding.py +++ b/examples/22_2_tiny_yolo_v4_device_side_decoding.py @@ -37,6 +37,9 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/23_autoexposure_roi.py b/examples/23_autoexposure_roi.py index 4ef538503..a5f9c532f 100755 --- a/examples/23_autoexposure_roi.py +++ b/examples/23_autoexposure_roi.py @@ -14,6 +14,9 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + previewSize = (300, 300) # Start defining a pipeline diff --git a/examples/26_1_spatial_mobilenet.py b/examples/26_1_spatial_mobilenet.py index 1facd9b66..851a5def5 100755 --- a/examples/26_1_spatial_mobilenet.py +++ b/examples/26_1_spatial_mobilenet.py @@ -23,6 +23,9 @@ if len(sys.argv) > 1: nnBlobPath = sys.argv[1] +if not Path(nnBlobPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/26_2_spatial_mobilenet_mono.py b/examples/26_2_spatial_mobilenet_mono.py index a6bc40f25..0b53e8679 100755 --- a/examples/26_2_spatial_mobilenet_mono.py +++ b/examples/26_2_spatial_mobilenet_mono.py @@ -26,6 +26,9 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/26_3_spatial_tiny_yolo.py b/examples/26_3_spatial_tiny_yolo.py index 2da9e706c..8ed9b282d 100755 --- a/examples/26_3_spatial_tiny_yolo.py +++ b/examples/26_3_spatial_tiny_yolo.py @@ -36,6 +36,9 @@ if len(sys.argv) > 1: nnBlobPath = sys.argv[1] +if not Path(nnBlobPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() From 2d1e3072762727d219cd5873cec0fce3cccaf28d Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Sat, 3 Apr 2021 21:43:57 +0300 Subject: [PATCH 27/36] Change RuntimeError to FileNotFoundError --- examples/08_rgb_mobilenet.py | 2 +- examples/09_mono_mobilenet.py | 2 +- examples/10_mono_depth_mobilenetssd.py | 2 +- examples/11_rgb_encoding_mono_mobilenet.py | 2 +- examples/12_rgb_encoding_mono_mobilenet_depth.py | 2 +- examples/15_rgb_mobilenet_4k.py | 2 +- examples/17_video_mobilenet.py | 2 +- examples/18_rgb_encoding_mobilenet.py | 2 +- examples/22_1_tiny_yolo_v3_device_side_decoding.py | 2 +- examples/22_2_tiny_yolo_v4_device_side_decoding.py | 2 +- examples/23_autoexposure_roi.py | 2 +- examples/26_1_spatial_mobilenet.py | 2 +- examples/26_2_spatial_mobilenet_mono.py | 2 +- examples/26_3_spatial_tiny_yolo.py | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/examples/08_rgb_mobilenet.py b/examples/08_rgb_mobilenet.py index e18adc1f2..78a8d3ad3 100755 --- a/examples/08_rgb_mobilenet.py +++ b/examples/08_rgb_mobilenet.py @@ -14,7 +14,7 @@ args = parser.parse_args() if not Path(nnPathDefault).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/09_mono_mobilenet.py b/examples/09_mono_mobilenet.py index a823b33cd..8a756fcea 100755 --- a/examples/09_mono_mobilenet.py +++ b/examples/09_mono_mobilenet.py @@ -12,7 +12,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/10_mono_depth_mobilenetssd.py b/examples/10_mono_depth_mobilenetssd.py index de7d986b9..d002febb2 100755 --- a/examples/10_mono_depth_mobilenetssd.py +++ b/examples/10_mono_depth_mobilenetssd.py @@ -12,7 +12,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/11_rgb_encoding_mono_mobilenet.py b/examples/11_rgb_encoding_mono_mobilenet.py index c8e636ba3..2f748d031 100755 --- a/examples/11_rgb_encoding_mono_mobilenet.py +++ b/examples/11_rgb_encoding_mono_mobilenet.py @@ -12,7 +12,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") pipeline = dai.Pipeline() diff --git a/examples/12_rgb_encoding_mono_mobilenet_depth.py b/examples/12_rgb_encoding_mono_mobilenet_depth.py index 34581891b..8e3f3d43a 100755 --- a/examples/12_rgb_encoding_mono_mobilenet_depth.py +++ b/examples/12_rgb_encoding_mono_mobilenet_depth.py @@ -12,7 +12,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") pipeline = dai.Pipeline() diff --git a/examples/15_rgb_mobilenet_4k.py b/examples/15_rgb_mobilenet_4k.py index c9c9653ad..6ec98be59 100755 --- a/examples/15_rgb_mobilenet_4k.py +++ b/examples/15_rgb_mobilenet_4k.py @@ -12,7 +12,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/17_video_mobilenet.py b/examples/17_video_mobilenet.py index 7269c8c0c..7891a4890 100755 --- a/examples/17_video_mobilenet.py +++ b/examples/17_video_mobilenet.py @@ -16,7 +16,7 @@ videoPath = sys.argv[2] if not Path(nnPath).exists() or not Path(videoPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/18_rgb_encoding_mobilenet.py b/examples/18_rgb_encoding_mobilenet.py index df5579eca..3581bd3a8 100755 --- a/examples/18_rgb_encoding_mobilenet.py +++ b/examples/18_rgb_encoding_mobilenet.py @@ -12,7 +12,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") pipeline = dai.Pipeline() diff --git a/examples/22_1_tiny_yolo_v3_device_side_decoding.py b/examples/22_1_tiny_yolo_v3_device_side_decoding.py index af2c359df..892c8419d 100755 --- a/examples/22_1_tiny_yolo_v3_device_side_decoding.py +++ b/examples/22_1_tiny_yolo_v3_device_side_decoding.py @@ -39,7 +39,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/22_2_tiny_yolo_v4_device_side_decoding.py b/examples/22_2_tiny_yolo_v4_device_side_decoding.py index 9dcf72c5e..b00f02547 100755 --- a/examples/22_2_tiny_yolo_v4_device_side_decoding.py +++ b/examples/22_2_tiny_yolo_v4_device_side_decoding.py @@ -38,7 +38,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/23_autoexposure_roi.py b/examples/23_autoexposure_roi.py index a5f9c532f..6aaab268a 100755 --- a/examples/23_autoexposure_roi.py +++ b/examples/23_autoexposure_roi.py @@ -15,7 +15,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") previewSize = (300, 300) diff --git a/examples/26_1_spatial_mobilenet.py b/examples/26_1_spatial_mobilenet.py index 851a5def5..7b768558b 100755 --- a/examples/26_1_spatial_mobilenet.py +++ b/examples/26_1_spatial_mobilenet.py @@ -24,7 +24,7 @@ nnBlobPath = sys.argv[1] if not Path(nnBlobPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/26_2_spatial_mobilenet_mono.py b/examples/26_2_spatial_mobilenet_mono.py index 0b53e8679..3e8d5ffef 100755 --- a/examples/26_2_spatial_mobilenet_mono.py +++ b/examples/26_2_spatial_mobilenet_mono.py @@ -27,7 +27,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/26_3_spatial_tiny_yolo.py b/examples/26_3_spatial_tiny_yolo.py index 8ed9b282d..cbc05a255 100755 --- a/examples/26_3_spatial_tiny_yolo.py +++ b/examples/26_3_spatial_tiny_yolo.py @@ -37,7 +37,7 @@ nnBlobPath = sys.argv[1] if not Path(nnBlobPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() From a1b889beaecc0aa3d0b045355ba2125f4ab6c0ff Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Sat, 3 Apr 2021 21:52:17 +0300 Subject: [PATCH 28/36] Change to sys.executable from python3 to make sure the suggested requirements are for the current interpreter --- examples/08_rgb_mobilenet.py | 3 ++- examples/09_mono_mobilenet.py | 3 ++- examples/10_mono_depth_mobilenetssd.py | 3 ++- examples/11_rgb_encoding_mono_mobilenet.py | 3 ++- examples/12_rgb_encoding_mono_mobilenet_depth.py | 3 ++- examples/15_rgb_mobilenet_4k.py | 3 ++- examples/17_video_mobilenet.py | 3 ++- examples/18_rgb_encoding_mobilenet.py | 3 ++- examples/22_1_tiny_yolo_v3_device_side_decoding.py | 3 ++- examples/22_2_tiny_yolo_v4_device_side_decoding.py | 3 ++- examples/23_autoexposure_roi.py | 3 ++- examples/26_1_spatial_mobilenet.py | 3 ++- examples/26_2_spatial_mobilenet_mono.py | 3 ++- examples/26_3_spatial_tiny_yolo.py | 3 ++- 14 files changed, 28 insertions(+), 14 deletions(-) diff --git a/examples/08_rgb_mobilenet.py b/examples/08_rgb_mobilenet.py index 78a8d3ad3..83c3bb40c 100755 --- a/examples/08_rgb_mobilenet.py +++ b/examples/08_rgb_mobilenet.py @@ -14,7 +14,8 @@ args = parser.parse_args() if not Path(nnPathDefault).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/09_mono_mobilenet.py b/examples/09_mono_mobilenet.py index 8a756fcea..e3be5dda6 100755 --- a/examples/09_mono_mobilenet.py +++ b/examples/09_mono_mobilenet.py @@ -12,7 +12,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/10_mono_depth_mobilenetssd.py b/examples/10_mono_depth_mobilenetssd.py index d002febb2..4b1aba93f 100755 --- a/examples/10_mono_depth_mobilenetssd.py +++ b/examples/10_mono_depth_mobilenetssd.py @@ -12,7 +12,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/11_rgb_encoding_mono_mobilenet.py b/examples/11_rgb_encoding_mono_mobilenet.py index 2f748d031..d2931dcb6 100755 --- a/examples/11_rgb_encoding_mono_mobilenet.py +++ b/examples/11_rgb_encoding_mono_mobilenet.py @@ -12,7 +12,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') pipeline = dai.Pipeline() diff --git a/examples/12_rgb_encoding_mono_mobilenet_depth.py b/examples/12_rgb_encoding_mono_mobilenet_depth.py index 8e3f3d43a..7acac8099 100755 --- a/examples/12_rgb_encoding_mono_mobilenet_depth.py +++ b/examples/12_rgb_encoding_mono_mobilenet_depth.py @@ -12,7 +12,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') pipeline = dai.Pipeline() diff --git a/examples/15_rgb_mobilenet_4k.py b/examples/15_rgb_mobilenet_4k.py index 6ec98be59..854df5044 100755 --- a/examples/15_rgb_mobilenet_4k.py +++ b/examples/15_rgb_mobilenet_4k.py @@ -12,7 +12,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/17_video_mobilenet.py b/examples/17_video_mobilenet.py index 7891a4890..ee1a9d3a5 100755 --- a/examples/17_video_mobilenet.py +++ b/examples/17_video_mobilenet.py @@ -16,7 +16,8 @@ videoPath = sys.argv[2] if not Path(nnPath).exists() or not Path(videoPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/18_rgb_encoding_mobilenet.py b/examples/18_rgb_encoding_mobilenet.py index 3581bd3a8..387434887 100755 --- a/examples/18_rgb_encoding_mobilenet.py +++ b/examples/18_rgb_encoding_mobilenet.py @@ -12,7 +12,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') pipeline = dai.Pipeline() diff --git a/examples/22_1_tiny_yolo_v3_device_side_decoding.py b/examples/22_1_tiny_yolo_v3_device_side_decoding.py index 892c8419d..de2ca3089 100755 --- a/examples/22_1_tiny_yolo_v3_device_side_decoding.py +++ b/examples/22_1_tiny_yolo_v3_device_side_decoding.py @@ -39,7 +39,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/22_2_tiny_yolo_v4_device_side_decoding.py b/examples/22_2_tiny_yolo_v4_device_side_decoding.py index b00f02547..df167b508 100755 --- a/examples/22_2_tiny_yolo_v4_device_side_decoding.py +++ b/examples/22_2_tiny_yolo_v4_device_side_decoding.py @@ -38,7 +38,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/23_autoexposure_roi.py b/examples/23_autoexposure_roi.py index 6aaab268a..b1b9fa15e 100755 --- a/examples/23_autoexposure_roi.py +++ b/examples/23_autoexposure_roi.py @@ -15,7 +15,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') previewSize = (300, 300) diff --git a/examples/26_1_spatial_mobilenet.py b/examples/26_1_spatial_mobilenet.py index 7b768558b..ea268ca21 100755 --- a/examples/26_1_spatial_mobilenet.py +++ b/examples/26_1_spatial_mobilenet.py @@ -24,7 +24,8 @@ nnBlobPath = sys.argv[1] if not Path(nnBlobPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/26_2_spatial_mobilenet_mono.py b/examples/26_2_spatial_mobilenet_mono.py index 3e8d5ffef..6c5148c86 100755 --- a/examples/26_2_spatial_mobilenet_mono.py +++ b/examples/26_2_spatial_mobilenet_mono.py @@ -27,7 +27,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/26_3_spatial_tiny_yolo.py b/examples/26_3_spatial_tiny_yolo.py index cbc05a255..fec9d179d 100755 --- a/examples/26_3_spatial_tiny_yolo.py +++ b/examples/26_3_spatial_tiny_yolo.py @@ -37,7 +37,8 @@ nnBlobPath = sys.argv[1] if not Path(nnBlobPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() From 60b42b84ddd0c73a62381de2293c3ed8bb43527c Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Sat, 3 Apr 2021 22:05:54 +0300 Subject: [PATCH 29/36] Fix failing example due to warning --- examples/23_autoexposure_roi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/23_autoexposure_roi.py b/examples/23_autoexposure_roi.py index b1b9fa15e..730695fbe 100755 --- a/examples/23_autoexposure_roi.py +++ b/examples/23_autoexposure_roi.py @@ -10,7 +10,7 @@ # Press N to go back to the region controlled by the NN detections. # Get argument first -nnPath = str((Path(__file__).parent / Path('models/mobilenet-ssd_openvino_2021.2_5shave.blob')).resolve().absolute()) +nnPath = str((Path(__file__).parent / Path('models/mobilenet-ssd_openvino_2021.2_6shave.blob')).resolve().absolute()) if len(sys.argv) > 1: nnPath = sys.argv[1] From c235560a0c90c8390678b3f78794c81883dbd4da Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Tue, 6 Apr 2021 22:11:07 +0300 Subject: [PATCH 30/36] Remove PYTHONPATH from install_requirements target --- examples/CMakeLists.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 4308e43ed..f9753f6b4 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -13,9 +13,6 @@ if(UNIX) endif() add_custom_target(install_requirements - # Python path (to find compiled module) - "PYTHONPATH=$${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}" - # Example COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_LIST_DIR}/install_requirements.py" "--skip_depthai" DEPENDS ${TARGET_NAME} VERBATIM From 482d78177a9a42a6b1cbb7bacc2cfc9811da5918 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Tue, 6 Apr 2021 22:38:25 +0300 Subject: [PATCH 31/36] Add executable permission to python scripts --- examples/28_camera_video_example.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 examples/28_camera_video_example.py diff --git a/examples/28_camera_video_example.py b/examples/28_camera_video_example.py old mode 100644 new mode 100755 From 3b32f32bb714f08e92fde4f852d56b684e69c548 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Pi=C5=82atowski?= Date: Sat, 10 Apr 2021 11:37:53 +0200 Subject: [PATCH 32/36] Use latest version in docs styles fetch (#210) --- docs/conf.py.in | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/conf.py.in b/docs/conf.py.in index 86cabe771..0285ed7d3 100644 --- a/docs/conf.py.in +++ b/docs/conf.py.in @@ -82,11 +82,11 @@ html_static_path = ['_static'] html_favicon = '_static/images/favicon.png' html_css_files = [ 'css/index.css', - 'https://docs.luxonis.com/en/gen2/_static/css/navbar.css', + 'https://docs.luxonis.com/en/latest/_static/css/navbar.css', ] html_js_files = [ - 'https://docs.luxonis.com/en/gen2/_static/js/navbar.js', + 'https://docs.luxonis.com/en/latest/_static/js/navbar.js', ] html_title = 'DepthAI documentation | Luxonis' -intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} \ No newline at end of file +intersphinx_mapping = {'python': ('https://docs.python.org/3', None)} From 44cdb90678421803667a8017fb03c90178283b92 Mon Sep 17 00:00:00 2001 From: cafemoloko Date: Sat, 10 Apr 2021 13:08:23 +0200 Subject: [PATCH 33/36] KVM installation guide --- docs/source/install.rst | 63 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/docs/source/install.rst b/docs/source/install.rst index 69aa20093..2cd3a7cd2 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -34,6 +34,7 @@ Fedora `Di Robot Operating System `Discord `__ Windows 7 :ref:`WinUSB driver ` `Discord `__ Docker :ref:`Pull and run official images ` `Discord `__ +Kernel Virtual Machine :ref:`Run on KVM ` `Discord `__ ====================== ===================================================== ================================================================================ macOS @@ -147,6 +148,68 @@ Run the :code:`01_rgb_preview.py` example inside a Docker container on a Linux h To allow the container to update X11 you may need to run :code:`xhost local:root` on the host. +KVM +*** + +To access the OAK-D camera in the `Kernel Virtual Machine `__, there is a need to attach and detach USB +devices on the fly when the host machine detects changes in the USB bus. + +OAK-D camera changes the USB device type when it is used by DepthAI API. This happens in backgound when the camera is used natively. +But when the camera is used in a virtual environment the situation is different. + +On your host machine, use the following code: + +.. code-block:: bash + + SUBSYSTEM=="usb", ACTION=="bind", ENV{ID_VENDOR_ID}=="03e7", MODE="0666", RUN+="/usr/local/bin/movidius_usb_hotplug.sh depthai-vm" + SUBSYSTEM=="usb", ACTION=="remove", ENV{PRODUCT}=="3e7/2485/1", ENV{DEVTYPE}=="usb_device", MODE="0666", RUN+="/usr/local/bin/movidius_usb_hotplug.sh depthai-vm" + SUBSYSTEM=="usb", ACTION=="remove", ENV{PRODUCT}=="3e7/f63b/100", ENV{DEVTYPE}=="usb_device", MODE="0666", RUN+="/usr/local/bin/movidius_usb_hotplug.sh depthai-vm" + +The script that the udev rule is calling (movidius_usb_hotplug.sh) should then attach/detach the USB device to the virtual machine. +In this case we need to call :code:`virsh` command. For example, the script could do the following: + +.. code-block:: bash + + #!/bin/bash + # Abort script execution on errors + set -e + if [ "${ACTION}" == 'bind' ]; then + COMMAND='attach-device' + elif [ "${ACTION}" == 'remove' ]; then + COMMAND='detach-device' + if [ "${PRODUCT}" == '3e7/2485/1' ]; then + ID_VENDOR_ID=03e7 + ID_MODEL_ID=2485 + fi + if [ "${PRODUCT}" == '3e7/f63b/100' ]; then + ID_VENDOR_ID=03e7 + ID_MODEL_ID=f63b + fi + else + echo "Invalid udev ACTION: ${ACTION}" >&2 + exit 1 + fi + echo "Running virsh ${COMMAND} ${DOMAIN} for ${ID_VENDOR}." >&2 + virsh "${COMMAND}" "${DOMAIN}" /dev/stdin < + + + + + + END + exit 0 + + +Note that when the device is disconnected from the USB bus, some udev environmental variables are not available (:code:`ID_VENDOR_ID` or :code:`ID_MODEL_ID`), +that is why you need to use :code:`PRODUCT` environmental variable to identify which device has been disconnected. + +The virtual machine where DepthAI API application is running should have defined a udev rules that identify the OAK-D camera. +The udev rule is decribed `here `__ + +Solution provided by `Manuel Segarra-Abad `__ + + Install from PyPI ################# From 9fd624b579b15865b36614ea4a27c9938c46e795 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Sun, 11 Apr 2021 12:35:25 +0200 Subject: [PATCH 34/36] fixed the depth/right frames, so they're flipped correctly now --- examples/10_mono_depth_mobilenetssd.py | 43 ++++++++++++++++---------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/examples/10_mono_depth_mobilenetssd.py b/examples/10_mono_depth_mobilenetssd.py index 4b1aba93f..e05b973d5 100755 --- a/examples/10_mono_depth_mobilenetssd.py +++ b/examples/10_mono_depth_mobilenetssd.py @@ -6,6 +6,9 @@ import depthai as dai import numpy as np + +flipRectified = True + # Get argument first nnPath = str((Path(__file__).parent / Path('models/mobilenet-ssd_openvino_2021.2_6shave.blob')).resolve().absolute()) if len(sys.argv) > 1: @@ -91,6 +94,16 @@ def frameNorm(frame, bbox): normVals[::2] = frame.shape[1] return (np.clip(np.array(bbox), 0, 1) * normVals).astype(int) + # Add bounding boxes and text to the frame and show it to the user + def show(name, frame): + for detection in detections: + bbox = frameNorm(frame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax)) + cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2) + cv2.putText(frame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) + cv2.putText(frame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) + # Show the frame + cv2.imshow(name, frame) + while True: # Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise inRight = qRight.tryGet() @@ -99,32 +112,30 @@ def frameNorm(frame, bbox): if inRight is not None: rightFrame = inRight.getCvFrame() + if flipRectified: + rightFrame = cv2.flip(rightFrame, 1) + if inDet is not None: detections = inDet.detections + if flipRectified: + for detection in detections: + swap = detection.xmin + detection.xmin = 1 - detection.xmax + detection.xmax = 1 - swap if inDepth is not None: # Frame is transformed, the color map will be applied to highlight the depth info - depthFrame = cv2.flip(inDepth.getFrame(), 1) # Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html - depthFrame = cv2.applyColorMap(depthFrame, cv2.COLORMAP_JET) + depthFrame = cv2.applyColorMap(inDepth.getFrame(), cv2.COLORMAP_JET) + + if depthFrame is not None: + show("depth", depthFrame) if rightFrame is not None: - for detection in detections: - bbox = frameNorm(rightFrame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax)) - cv2.rectangle(rightFrame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2) - cv2.putText(rightFrame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) - cv2.putText(rightFrame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) - cv2.imshow("rectified right", rightFrame) + show("rectified right", rightFrame) - if depthFrame is not None: - for detection in detections: - bbox = frameNorm(croppedFrame, (detection.xmin, detection.ymin, detection.xmax, detection.ymax)) - bbox[::2] += offsetX - cv2.rectangle(depthFrame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255, 0, 0), 2) - cv2.putText(depthFrame, labelMap[detection.label], (bbox[0] + 10, bbox[1] + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) - cv2.putText(depthFrame, f"{int(detection.confidence * 100)}%", (bbox[0] + 10, bbox[1] + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) - cv2.imshow("depth", depthFrame) + detections = [] if cv2.waitKey(1) == ord('q'): break From c91852e3bc6ef455a84ca528acb5db0cf15c682e Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Mon, 12 Apr 2021 16:38:33 +0300 Subject: [PATCH 35/36] Resolve merge conflict in README.md --- README.md | 80 +++++++++++++++++++++++++++---------------------------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/README.md b/README.md index 2f1aa3f8a..ca7e019eb 100644 --- a/README.md +++ b/README.md @@ -10,46 +10,6 @@ Python bindings for C++ depthai-core library Documentation is available over at [Luxonis DepthAI API](https://docs.luxonis.com/projects/api/en/latest/) -### Building documentation - -- **Using [Docker](https://docs.docker.com/) (with [Docker Compose](https://docs.docker.com/compose/install/))** - - ``` - cd docs - docker-compose build - docker-compose up - ``` - - Then open [http://localhost:8000](http://localhost:8000). - - This docker container will watch changes in the `docs/source` directory and rebuild the docs automatically - -- **Linux** - - First, please install the required [dependencies](#Dependencies) - - Then run the following commands to build the docs website - - ``` - python3 -m pip install -U pip - python3 -m pip install -r docs/requirements.txt - cmake -S . -B build -D DEPTHAI_BUILD_DOCS=ON -D DEPTHAI_PYTHON_BUILD_DOCS=ON - cmake --build build --target sphinx --parallel - python3 -m http.server --bind 0.0.0.0 8000 --directory build/docs/sphinx - ``` - - Then open [http://localhost:8000](http://localhost:8000). - - This will build documentation based on current sources, so if some new changes will be made, run this command - in a new terminal window to update the website source - - ``` - cmake --build build --target sphinx --parallel - ``` - - Then refresh your page - it should load the updated website that was just built - - ## Installation Prebuilt wheels are available in [Luxonis repository](https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local/) @@ -110,6 +70,46 @@ ctest - Raspbian 10; - macOS 10.14.6, 10.15.4; +### Building documentation + +- **Using [Docker](https://docs.docker.com/) (with [Docker Compose](https://docs.docker.com/compose/install/))** + + ``` + cd docs + sudo docker-compose build + sudo docker-compose up + ``` + + > ℹ️ You can leave out the `sudo` if you have added your user to the `docker` group (or are using rootless docker). + Then open [http://localhost:8000](http://localhost:8000). + + This docker container will watch changes in the `docs/source` directory and rebuild the docs automatically + +- **Linux** + + First, please install the required [dependencies](#Dependencies) + + Then run the following commands to build the docs website + + ``` + python3 -m pip install -U pip + python3 -m pip install -r docs/requirements.txt + cmake -S . -B build -D DEPTHAI_BUILD_DOCS=ON -D DEPTHAI_PYTHON_BUILD_DOCS=ON + cmake --build build --parallel --target sphinx + python3 -m http.server --bind 0.0.0.0 8000 --directory build/docs/sphinx + ``` + + Then open [http://localhost:8000](http://localhost:8000). + + This will build documentation based on current sources, so if some new changes will be made, run this command + in a new terminal window to update the website source + + ``` + cmake --build build --parallel --target sphinx + ``` + + Then refresh your page - it should load the updated website that was just built + ## Troubleshooting ### Relocation link error From 099a07bde48f73dd4fca580f97ca43a09a882134 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Mon, 12 Apr 2021 16:45:03 +0300 Subject: [PATCH 36/36] Update submodule --- README.md | 4 ++-- depthai-core | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index ca7e019eb..ce8e6ad90 100644 --- a/README.md +++ b/README.md @@ -121,7 +121,7 @@ Build failure on Ubuntu 18.04 ("relocation ..." link error) with gcc 7.4.0 (defa sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 70 sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-8 70 ### Hunter -Hunter is a CMake-only dependency manager for C/C++ projects. +Hunter is a CMake-only dependency manager for C/C++ projects. If you are stuck with error message which mentions external libraries (subdirectory of `.hunter`) like the following: ``` @@ -145,7 +145,7 @@ del C:/[user]/.hunter ### LTO - link time optimization -If following message appears: +If following message appears: ``` lto1: internal compiler error: in add_symbol_to_partition_1, at lto/lto-partition.c:152 Please submit a full bug report, diff --git a/depthai-core b/depthai-core index 9eb4ebf1d..5a8773a96 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 9eb4ebf1da16bf68f2d84c37c631260edd374a3a +Subproject commit 5a8773a96587d6597016cfb946a375b5e68e824c