From d898c15912e9ec7a68f6a0d81821a4112ad6a099 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Wed, 14 Apr 2021 17:52:08 +0300 Subject: [PATCH 01/56] Renamed files to match with python examples and added a new example --- examples/CMakeLists.txt | 75 ++++++++++--------- ...preview_example.cpp => 01_rgb_preview.cpp} | 17 ++--- examples/src/02_mono_preview.cpp | 59 +++++++++++++++ ...oding_example.cpp => 04_h264_encoding.cpp} | 0 ...lenet_example.cpp => 08_rgb_mobilenet.cpp} | 0 ...mple.cpp => 14_1_color_camera_control.cpp} | 2 +- ..._example.cpp => 16_device_queue_event.cpp} | 0 ...rp_example.cpp => 20_image_manip_warp.cpp} | 0 ... => 21_mobilenet_device_side_decoding.cpp} | 20 ++--- ...2_1_tiny_yolo_v3_device_side_decoding.cpp} | 20 ++--- ...2_2_tiny_yolo_v4_device_side_decoding.cpp} | 20 ++--- ...port_example.cpp => 24_opencv_support.cpp} | 0 ..._example.cpp => 25_system_information.cpp} | 0 ...example.cpp => 26_1_spatial_mobilenet.cpp} | 4 +- ...le.cpp => 26_2_spatial_mobilenet_mono.cpp} | 4 +- ...example.cpp => 26_3_spatial_tiny_yolo.cpp} | 4 +- ...cpp => 27_spatial_location_calculator.cpp} | 0 ..._example.cpp => 28_1_rgb_camera_video.cpp} | 8 +- ...example.cpp => 28_2_mono_camera_video.cpp} | 0 ...er_example.cpp => 29_1_object_tracker.cpp} | 4 +- ...le.cpp => 29_2_spatial_object_tracker.cpp} | 4 +- ...xample.cpp => 30_2_stereo_depth_video.cpp} | 0 22 files changed, 151 insertions(+), 90 deletions(-) rename examples/src/{camera_preview_example.cpp => 01_rgb_preview.cpp} (74%) create mode 100644 examples/src/02_mono_preview.cpp rename examples/src/{h264_encoding_example.cpp => 04_h264_encoding.cpp} (100%) rename examples/src/{camera_mobilenet_example.cpp => 08_rgb_mobilenet.cpp} (100%) rename examples/src/{color_camera_control_example.cpp => 14_1_color_camera_control.cpp} (98%) rename examples/src/{device_queue_event_example.cpp => 16_device_queue_event.cpp} (100%) rename examples/src/{image_manip_warp_example.cpp => 20_image_manip_warp.cpp} (100%) rename examples/src/{mobilenet_device_side_decoding_example.cpp => 21_mobilenet_device_side_decoding.cpp} (87%) rename examples/src/{tiny_yolo_v3_device_side_decoding_example.cpp => 22_1_tiny_yolo_v3_device_side_decoding.cpp} (90%) rename examples/src/{tiny_yolo_v4_device_side_decoding_example.cpp => 22_2_tiny_yolo_v4_device_side_decoding.cpp} (90%) rename examples/src/{opencv_support_example.cpp => 24_opencv_support.cpp} (100%) rename examples/src/{system_information_example.cpp => 25_system_information.cpp} (100%) rename examples/src/{spatial_mobilenet_example.cpp => 26_1_spatial_mobilenet.cpp} (98%) rename examples/src/{spatial_mobilenet_mono_example.cpp => 26_2_spatial_mobilenet_mono.cpp} (98%) rename examples/src/{spatial_tiny_yolo_example.cpp => 26_3_spatial_tiny_yolo.cpp} (98%) rename examples/src/{spatial_location_calculator_example.cpp => 27_spatial_location_calculator.cpp} (100%) rename examples/src/{camera_video_example.cpp => 28_1_rgb_camera_video.cpp} (94%) rename examples/src/{mono_camera_example.cpp => 28_2_mono_camera_video.cpp} (100%) rename examples/src/{object_tracker_example.cpp => 29_1_object_tracker.cpp} (98%) rename examples/src/{spatial_object_tracker_example.cpp => 29_2_spatial_object_tracker.cpp} (98%) rename examples/src/{stereo_example.cpp => 30_2_stereo_depth_video.cpp} (100%) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 794832f56..f30d9bb04 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -19,7 +19,7 @@ macro(dai_add_example example_name example_src) # parse the rest of the arguments set(arguments ${ARGV}) list(REMOVE_AT arguments 0 1) - + # If 'DEPTHAI_TEST_EXAMPLES' is ON, then examples will be part of the test suite if(DEPTHAI_TEST_EXAMPLES) add_test(NAME ${example_name} COMMAND ${CMAKE_COMMAND} @@ -28,8 +28,8 @@ macro(dai_add_example example_name example_src) ) # Sets a regex catching any logged warnings, errors or critical (coming either from device or host) - set_tests_properties (${example_name} PROPERTIES FAIL_REGULAR_EXPRESSION "\\[warning\\];\\[error\\];\\[critical\\]") - + set_tests_properties (${example_name} PROPERTIES FAIL_REGULAR_EXPRESSION "\\[warning\\];\\[error\\];\\[critical\\]") + # Add sanitizers for tests as well add_sanitizers(${example_name}) @@ -37,7 +37,7 @@ macro(dai_add_example example_name example_src) set_tests_properties(${example_name} PROPERTIES ENVIRONMENT "UBSAN_OPTIONS=halt_on_error=1") endif() -endmacro() +endmacro() # Create a custom target which runs all examples for 10 seconds max, and check if they executed without errors @@ -58,13 +58,16 @@ endmacro() ## message(STATUS "Location of test1.data: ${test1_data}") # Color camera preview output example -dai_add_example(camera_preview src/camera_preview_example.cpp) +dai_add_example(01_rgb_preview src/01_rgb_preview.cpp) + +# Mono camera preview output example +dai_add_example(02_mono_preview src/02_mono_preview.cpp) # Color camera video output example -dai_add_example(camera_video src/camera_video_example.cpp) +dai_add_example(28_1_rgb_camera_video src/28_1_rgb_camera_video.cpp) # Mono camera video output example -dai_add_example(mono_camera src/mono_camera_example.cpp) +dai_add_example(28_2_mono_camera_video src/28_2_mono_camera_video.cpp) # NeuralNetwork node, mobilenet example hunter_private_data( @@ -98,8 +101,8 @@ hunter_private_data( LOCATION mobilenet_8shaves_blob ) -dai_add_example(camera_mobilenet src/camera_mobilenet_example.cpp) -target_compile_definitions(camera_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(08_rgb_mobilenet src/08_rgb_mobilenet.cpp) +target_compile_definitions(08_rgb_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") # NeuralNetwork node, webcam input (from host) dai_add_example(webcam_mobilenet src/webcam_mobilenet_example.cpp) @@ -109,66 +112,66 @@ target_compile_definitions(webcam_mobilenet PRIVATE BLOB_PATH="${mobilenet_8shav dai_add_example(mjpeg_encoding src/mjpeg_encoding_example.cpp) # h264 encoding -dai_add_example(h264_encoding src/h264_encoding_example.cpp) +dai_add_example(04_h264_encoding src/04_h264_encoding.cpp) -# StereoDepth example -dai_add_example(stereo src/stereo_example.cpp) +# Stereo Depth example +dai_add_example(30_2_stereo_depth_video src/30_2_stereo_depth_video.cpp) # Image Manip node examples dai_add_example(image_manip src/image_manip_example.cpp) -dai_add_example(image_manip_warp src/image_manip_warp_example.cpp) +dai_add_example(20_image_manip_warp src/20_image_manip_warp.cpp) # Color Camera config example -dai_add_example(color_camera_control src/color_camera_control_example.cpp) +dai_add_example(14_1_color_camera_control src/14_1_color_camera_control.cpp) # System information example -dai_add_example(system_information src/system_information_example.cpp) +dai_add_example(25_system_information src/25_system_information.cpp) # Device getQueueEvent example -dai_add_example(device_queue_event src/device_queue_event_example.cpp) +dai_add_example(16_device_queue_event src/16_device_queue_event.cpp) # OpenCV support example -dai_add_example(opencv_support src/opencv_support_example.cpp) +dai_add_example(24_opencv_support src/24_opencv_support.cpp) # Device side decoding example for mobilenet-ssd -dai_add_example(mobilenet_device_side_decoding src/mobilenet_device_side_decoding_example.cpp) -target_compile_definitions(mobilenet_device_side_decoding PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(21_mobilenet_device_side_decoding src/21_mobilenet_device_side_decoding.cpp) +target_compile_definitions(21_mobilenet_device_side_decoding PRIVATE BLOB_PATH="${mobilenet_blob}") # Device side decoding example for mobilenet-ssd with 3d coordinates on RGB camera -dai_add_example(spatial_mobilenet src/spatial_mobilenet_example.cpp) -target_compile_definitions(spatial_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(26_1_spatial_mobilenet src/26_1_spatial_mobilenet.cpp) +target_compile_definitions(26_1_spatial_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") # Device side decoding example for mobilenet-ssd with 3d coordinates on right camera -dai_add_example(spatial_mobilenet_mono src/spatial_mobilenet_mono_example.cpp) -target_compile_definitions(spatial_mobilenet_mono PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(26_2_spatial_mobilenet_mono src/26_2_spatial_mobilenet_mono.cpp) +target_compile_definitions(26_2_spatial_mobilenet_mono PRIVATE BLOB_PATH="${mobilenet_blob}") # Device side decoding example for tiny-yolo-v3 -dai_add_example(tiny_yolo_v3_device_side_decoding src/tiny_yolo_v3_device_side_decoding_example.cpp) -target_compile_definitions(tiny_yolo_v3_device_side_decoding PRIVATE BLOB_PATH="${tiny_yolo_v3_blob}") +dai_add_example(22_1_tiny_yolo_v3_device_side_decoding src/22_1_tiny_yolo_v3_device_side_decoding.cpp) +target_compile_definitions(22_1_tiny_yolo_v3_device_side_decoding PRIVATE BLOB_PATH="${tiny_yolo_v3_blob}") # Device side decoding example for tiny-yolo-v4 -dai_add_example(tiny_yolo_v4_device_side_decoding src/tiny_yolo_v4_device_side_decoding_example.cpp) -target_compile_definitions(tiny_yolo_v4_device_side_decoding PRIVATE BLOB_PATH="${tiny_yolo_v4_blob}") +dai_add_example(22_2_tiny_yolo_v4_device_side_decoding src/22_2_tiny_yolo_v4_device_side_decoding.cpp) +target_compile_definitions(22_2_tiny_yolo_v4_device_side_decoding PRIVATE BLOB_PATH="${tiny_yolo_v4_blob}") # Sync example between NN and camera frames dai_add_example(camera_mobilenet_sync src/camera_mobilenet_sync_example.cpp) target_compile_definitions(camera_mobilenet_sync PRIVATE BLOB_PATH="${mobilenet_blob}") -dai_add_example(spatial_location_calculator src/spatial_location_calculator_example.cpp) +dai_add_example(27_spatial_location_calculator src/27_spatial_location_calculator.cpp) # Device side decoding example for tiny-yolo-v3 with 3d coordinates on RGB camera -dai_add_example(spatial_tiny_yolo_v3 src/spatial_tiny_yolo_example.cpp) -target_compile_definitions(spatial_tiny_yolo_v3 PRIVATE BLOB_PATH="${tiny_yolo_v3_blob}") +dai_add_example(26_3_spatial_tiny_yolo_v3 src/26_3_spatial_tiny_yolo.cpp) +target_compile_definitions(26_3_spatial_tiny_yolo_v3 PRIVATE BLOB_PATH="${tiny_yolo_v3_blob}") # Device side decoding example for tiny-yolo-v4 with 3d coordinates on RGB camera -dai_add_example(spatial_tiny_yolo_v4 src/spatial_tiny_yolo_example.cpp) -target_compile_definitions(spatial_tiny_yolo_v4 PRIVATE BLOB_PATH="${tiny_yolo_v4_blob}") +dai_add_example(26_4_spatial_tiny_yolo_v4 src/26_3_spatial_tiny_yolo.cpp) +target_compile_definitions(26_4_spatial_tiny_yolo_v4 PRIVATE BLOB_PATH="${tiny_yolo_v4_blob}") # Object tracker example on mobilenet SSD output -dai_add_example(object_tracker_example src/object_tracker_example.cpp) -target_compile_definitions(object_tracker_example PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(29_1_object_tracker src/29_1_object_tracker.cpp) +target_compile_definitions(29_1_object_tracker PRIVATE BLOB_PATH="${mobilenet_blob}") # Spatial Object tracker example on mobilenet SSD output -dai_add_example(spatial_object_tracker_example src/spatial_object_tracker_example.cpp) -target_compile_definitions(spatial_object_tracker_example PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(29_2_spatial_object_tracker src/29_2_spatial_object_tracker.cpp) +target_compile_definitions(29_2_spatial_object_tracker PRIVATE BLOB_PATH="${mobilenet_blob}") diff --git a/examples/src/camera_preview_example.cpp b/examples/src/01_rgb_preview.cpp similarity index 74% rename from examples/src/camera_preview_example.cpp rename to examples/src/01_rgb_preview.cpp index fdc33aa9c..8e0509fa0 100644 --- a/examples/src/camera_preview_example.cpp +++ b/examples/src/01_rgb_preview.cpp @@ -7,19 +7,18 @@ // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" -dai::Pipeline createCameraPipeline() { +dai::Pipeline createPipeline() { dai::Pipeline p; - auto colorCam = p.create(); + auto camRgb = p.create(); + camRgb->setPreviewSize(300, 300); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(true); + auto xlinkOut = p.create(); xlinkOut->setStreamName("preview"); - - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(true); - // Link plugins CAM -> XLINK - colorCam->preview.link(xlinkOut->input); + camRgb->preview.link(xlinkOut->input); return p; } @@ -27,7 +26,7 @@ dai::Pipeline createCameraPipeline() { int main() { using namespace std; - dai::Pipeline p = createCameraPipeline(); + dai::Pipeline p = createPipeline(); dai::Device d(p); d.startPipeline(); diff --git a/examples/src/02_mono_preview.cpp b/examples/src/02_mono_preview.cpp new file mode 100644 index 000000000..189d97c45 --- /dev/null +++ b/examples/src/02_mono_preview.cpp @@ -0,0 +1,59 @@ + +#include +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +dai::Pipeline createPipeline() { + dai::Pipeline p; + + auto camLeft = p.create(); + camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + + auto camRight = p.create(); + camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + + auto xoutLeft = p.create(); + xoutLeft->setStreamName("left"); + auto xoutRight = p.create(); + xoutRight->setStreamName("right"); + + // Link plugins CAM -> XLINK + camLeft->out.link(xoutLeft->input); + camRight->out.link(xoutRight->input); + + return p; +} + +int main() { + using namespace std; + + dai::Pipeline p = createPipeline(); + dai::Device d(p); + d.startPipeline(); + + cv::Mat frame; + auto leftQueue = d.getOutputQueue("left"); + auto rightQueue = d.getOutputQueue("right"); + + while(1) { + auto left = leftQueue->get(); + cv::Mat LeftFrame = left->getFrame(); + cv::imshow("left", LeftFrame); + auto right = rightQueue->get(); + cv::Mat RightFrame = right->getFrame(); + cv::imshow("right", RightFrame); + + int key = cv::waitKey(1); + if(key == 'q') { + return 0; + } + } + + return 0; +} diff --git a/examples/src/h264_encoding_example.cpp b/examples/src/04_h264_encoding.cpp similarity index 100% rename from examples/src/h264_encoding_example.cpp rename to examples/src/04_h264_encoding.cpp diff --git a/examples/src/camera_mobilenet_example.cpp b/examples/src/08_rgb_mobilenet.cpp similarity index 100% rename from examples/src/camera_mobilenet_example.cpp rename to examples/src/08_rgb_mobilenet.cpp diff --git a/examples/src/color_camera_control_example.cpp b/examples/src/14_1_color_camera_control.cpp similarity index 98% rename from examples/src/color_camera_control_example.cpp rename to examples/src/14_1_color_camera_control.cpp index 56715a033..5bf31c3b8 100644 --- a/examples/src/color_camera_control_example.cpp +++ b/examples/src/14_1_color_camera_control.cpp @@ -159,7 +159,7 @@ int main() { if(key == 'l') sens_iso += ISO_STEP; exp_time = clamp(exp_time, exp_min, exp_max); sens_iso = clamp(sens_iso, sens_min, sens_max); - printf("Setting manual exposure, time %d us, iso %d\n", exp_time, sens_iso); + printf("Setting manual exposure, time: %d, iso: %d\n", exp_time, sens_iso); dai::CameraControl ctrl; ctrl.setManualExposure(exp_time, sens_iso); controlQueue->send(ctrl); diff --git a/examples/src/device_queue_event_example.cpp b/examples/src/16_device_queue_event.cpp similarity index 100% rename from examples/src/device_queue_event_example.cpp rename to examples/src/16_device_queue_event.cpp diff --git a/examples/src/image_manip_warp_example.cpp b/examples/src/20_image_manip_warp.cpp similarity index 100% rename from examples/src/image_manip_warp_example.cpp rename to examples/src/20_image_manip_warp.cpp diff --git a/examples/src/mobilenet_device_side_decoding_example.cpp b/examples/src/21_mobilenet_device_side_decoding.cpp similarity index 87% rename from examples/src/mobilenet_device_side_decoding_example.cpp rename to examples/src/21_mobilenet_device_side_decoding.cpp index 1794cb5ac..828d0a8a3 100644 --- a/examples/src/mobilenet_device_side_decoding_example.cpp +++ b/examples/src/21_mobilenet_device_side_decoding.cpp @@ -13,10 +13,10 @@ static const std::vector labelMap = {"background", "aeroplane", "bi static bool syncNN = true; -dai::Pipeline createNNPipeline(std::string nnPath) { +dai::Pipeline createPipeline(std::string nnPath) { dai::Pipeline p; - auto colorCam = p.create(); + auto camRgb = p.create(); auto xlinkOut = p.create(); auto detectionNetwork = p.create(); auto nnOut = p.create(); @@ -24,22 +24,22 @@ dai::Pipeline createNNPipeline(std::string nnPath) { xlinkOut->setStreamName("preview"); nnOut->setStreamName("detections"); - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); - colorCam->setFps(40); + camRgb->setPreviewSize(300, 300); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + camRgb->setFps(40); // testing MobileNet DetectionNetwork detectionNetwork->setConfidenceThreshold(0.5f); detectionNetwork->setBlobPath(nnPath); // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(detectionNetwork->input); + camRgb->preview.link(detectionNetwork->input); if(syncNN) { detectionNetwork->passthrough.link(xlinkOut->input); } else { - colorCam->preview.link(xlinkOut->input); + camRgb->preview.link(xlinkOut->input); } detectionNetwork->out.link(nnOut->input); @@ -61,7 +61,7 @@ int main(int argc, char** argv) { printf("Using blob at path: %s\n", nnPath.c_str()); // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); + dai::Pipeline p = createPipeline(nnPath); // Connect to device with above created pipeline dai::Device d(p); diff --git a/examples/src/tiny_yolo_v3_device_side_decoding_example.cpp b/examples/src/22_1_tiny_yolo_v3_device_side_decoding.cpp similarity index 90% rename from examples/src/tiny_yolo_v3_device_side_decoding_example.cpp rename to examples/src/22_1_tiny_yolo_v3_device_side_decoding.cpp index 6df7ac9e1..d56262234 100644 --- a/examples/src/tiny_yolo_v3_device_side_decoding_example.cpp +++ b/examples/src/22_1_tiny_yolo_v3_device_side_decoding.cpp @@ -20,21 +20,21 @@ static const std::vector labelMap = { static bool syncNN = true; -dai::Pipeline createNNPipeline(std::string nnPath) { +dai::Pipeline createPipeline(std::string nnPath) { dai::Pipeline p; - auto colorCam = p.create(); + auto camRgb = p.create(); auto xlinkOut = p.create(); auto nnOut = p.create(); xlinkOut->setStreamName("preview"); nnOut->setStreamName("detections"); - colorCam->setPreviewSize(416, 416); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); - colorCam->setFps(40); + camRgb->setPreviewSize(416, 416); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + camRgb->setFps(40); auto detectionNetwork = p.create(); @@ -48,11 +48,11 @@ dai::Pipeline createNNPipeline(std::string nnPath) { detectionNetwork->setBlobPath(nnPath); // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(detectionNetwork->input); + camRgb->preview.link(detectionNetwork->input); if(syncNN) { detectionNetwork->passthrough.link(xlinkOut->input); } else { - colorCam->preview.link(xlinkOut->input); + camRgb->preview.link(xlinkOut->input); } detectionNetwork->out.link(nnOut->input); @@ -74,7 +74,7 @@ int main(int argc, char** argv) { printf("Using blob at path: %s\n", nnPath.c_str()); // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); + dai::Pipeline p = createPipeline(nnPath); // Connect to device with above created pipeline dai::Device d(p); diff --git a/examples/src/tiny_yolo_v4_device_side_decoding_example.cpp b/examples/src/22_2_tiny_yolo_v4_device_side_decoding.cpp similarity index 90% rename from examples/src/tiny_yolo_v4_device_side_decoding_example.cpp rename to examples/src/22_2_tiny_yolo_v4_device_side_decoding.cpp index 211f196a6..c34976c01 100644 --- a/examples/src/tiny_yolo_v4_device_side_decoding_example.cpp +++ b/examples/src/22_2_tiny_yolo_v4_device_side_decoding.cpp @@ -25,21 +25,21 @@ static const std::vector labelMap = { static bool syncNN = true; -dai::Pipeline createNNPipeline(std::string nnPath) { +dai::Pipeline createPipeline(std::string nnPath) { dai::Pipeline p; - auto colorCam = p.create(); + auto camRgb = p.create(); auto xlinkOut = p.create(); auto nnOut = p.create(); xlinkOut->setStreamName("preview"); nnOut->setStreamName("detections"); - colorCam->setPreviewSize(416, 416); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); - colorCam->setFps(40); + camRgb->setPreviewSize(416, 416); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + camRgb->setFps(40); auto detectionNetwork = p.create(); @@ -53,11 +53,11 @@ dai::Pipeline createNNPipeline(std::string nnPath) { detectionNetwork->setBlobPath(nnPath); // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(detectionNetwork->input); + camRgb->preview.link(detectionNetwork->input); if(syncNN) { detectionNetwork->passthrough.link(xlinkOut->input); } else { - colorCam->preview.link(xlinkOut->input); + camRgb->preview.link(xlinkOut->input); } detectionNetwork->out.link(nnOut->input); @@ -79,7 +79,7 @@ int main(int argc, char** argv) { printf("Using blob at path: %s\n", nnPath.c_str()); // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); + dai::Pipeline p = createPipeline(nnPath); // Connect to device with above created pipeline dai::Device d(p); diff --git a/examples/src/opencv_support_example.cpp b/examples/src/24_opencv_support.cpp similarity index 100% rename from examples/src/opencv_support_example.cpp rename to examples/src/24_opencv_support.cpp diff --git a/examples/src/system_information_example.cpp b/examples/src/25_system_information.cpp similarity index 100% rename from examples/src/system_information_example.cpp rename to examples/src/25_system_information.cpp diff --git a/examples/src/spatial_mobilenet_example.cpp b/examples/src/26_1_spatial_mobilenet.cpp similarity index 98% rename from examples/src/spatial_mobilenet_example.cpp rename to examples/src/26_1_spatial_mobilenet.cpp index 338c5bc1e..a81cc47af 100644 --- a/examples/src/spatial_mobilenet_example.cpp +++ b/examples/src/26_1_spatial_mobilenet.cpp @@ -13,7 +13,7 @@ static const std::vector labelMap = {"background", "aeroplane", "bi static bool syncNN = true; -dai::Pipeline createNNPipeline(std::string nnPath) { +dai::Pipeline createPipeline(std::string nnPath) { dai::Pipeline p; // create nodes @@ -89,7 +89,7 @@ int main(int argc, char** argv) { printf("Using blob at path: %s\n", nnPath.c_str()); // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); + dai::Pipeline p = createPipeline(nnPath); // Connect to device with above created pipeline dai::Device d(p); diff --git a/examples/src/spatial_mobilenet_mono_example.cpp b/examples/src/26_2_spatial_mobilenet_mono.cpp similarity index 98% rename from examples/src/spatial_mobilenet_mono_example.cpp rename to examples/src/26_2_spatial_mobilenet_mono.cpp index 9cbd58025..986dd5944 100644 --- a/examples/src/spatial_mobilenet_mono_example.cpp +++ b/examples/src/26_2_spatial_mobilenet_mono.cpp @@ -14,7 +14,7 @@ static const std::vector labelMap = {"background", "aeroplane", "bi static bool syncNN = true; static bool flipRectified = true; -dai::Pipeline createNNPipeline(std::string nnPath) { +dai::Pipeline createPipeline(std::string nnPath) { dai::Pipeline p; auto xlinkOut = p.create(); @@ -91,7 +91,7 @@ int main(int argc, char** argv) { printf("Using blob at path: %s\n", nnPath.c_str()); // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); + dai::Pipeline p = createPipeline(nnPath); // Connect to device with above created pipeline dai::Device d(p); diff --git a/examples/src/spatial_tiny_yolo_example.cpp b/examples/src/26_3_spatial_tiny_yolo.cpp similarity index 98% rename from examples/src/spatial_tiny_yolo_example.cpp rename to examples/src/26_3_spatial_tiny_yolo.cpp index 19a67a602..3360276d4 100644 --- a/examples/src/spatial_tiny_yolo_example.cpp +++ b/examples/src/26_3_spatial_tiny_yolo.cpp @@ -20,7 +20,7 @@ static const std::vector labelMap = { static bool syncNN = true; -dai::Pipeline createNNPipeline(std::string nnPath) { +dai::Pipeline createPipeline(std::string nnPath) { dai::Pipeline p; // create nodes @@ -103,7 +103,7 @@ int main(int argc, char** argv) { printf("Using blob at path: %s\n", nnPath.c_str()); // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); + dai::Pipeline p = createPipeline(nnPath); // Connect to device with above created pipeline dai::Device d(p); diff --git a/examples/src/spatial_location_calculator_example.cpp b/examples/src/27_spatial_location_calculator.cpp similarity index 100% rename from examples/src/spatial_location_calculator_example.cpp rename to examples/src/27_spatial_location_calculator.cpp diff --git a/examples/src/camera_video_example.cpp b/examples/src/28_1_rgb_camera_video.cpp similarity index 94% rename from examples/src/camera_video_example.cpp rename to examples/src/28_1_rgb_camera_video.cpp index 9595579b0..cecb6a6fb 100644 --- a/examples/src/camera_video_example.cpp +++ b/examples/src/28_1_rgb_camera_video.cpp @@ -7,17 +7,17 @@ // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" -dai::Pipeline createCameraFullPipeline() { +dai::Pipeline createPipeline() { dai::Pipeline p; auto colorCam = p.create(); - auto xlinkOut = p.create(); - xlinkOut->setStreamName("video"); colorCam->setPreviewSize(300, 300); colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); colorCam->setInterleaved(true); + auto xlinkOut = p.create(); + xlinkOut->setStreamName("video"); // Link plugins CAM -> XLINK colorCam->video.link(xlinkOut->input); @@ -29,7 +29,7 @@ int main() { using namespace std::chrono; // Create pipeline - dai::Pipeline p = createCameraFullPipeline(); + dai::Pipeline p = createPipeline(); // Connect to device with above created pipeline dai::Device d(p); diff --git a/examples/src/mono_camera_example.cpp b/examples/src/28_2_mono_camera_video.cpp similarity index 100% rename from examples/src/mono_camera_example.cpp rename to examples/src/28_2_mono_camera_video.cpp diff --git a/examples/src/object_tracker_example.cpp b/examples/src/29_1_object_tracker.cpp similarity index 98% rename from examples/src/object_tracker_example.cpp rename to examples/src/29_1_object_tracker.cpp index 30ef1bfb8..e55403d89 100644 --- a/examples/src/object_tracker_example.cpp +++ b/examples/src/29_1_object_tracker.cpp @@ -13,7 +13,7 @@ static const std::vector labelMap = {"background", "aeroplane", "bi static bool syncNN = true; -dai::Pipeline createNNPipeline(std::string nnPath) { +dai::Pipeline createPipeline(std::string nnPath) { dai::Pipeline p; auto colorCam = p.create(); @@ -69,7 +69,7 @@ int main(int argc, char** argv) { printf("Using blob at path: %s\n", nnPath.c_str()); // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); + dai::Pipeline p = createPipeline(nnPath); // Connect to device with above created pipeline dai::Device d(p); diff --git a/examples/src/spatial_object_tracker_example.cpp b/examples/src/29_2_spatial_object_tracker.cpp similarity index 98% rename from examples/src/spatial_object_tracker_example.cpp rename to examples/src/29_2_spatial_object_tracker.cpp index 2692e2809..d3dc2d937 100644 --- a/examples/src/spatial_object_tracker_example.cpp +++ b/examples/src/29_2_spatial_object_tracker.cpp @@ -13,7 +13,7 @@ static const std::vector labelMap = {"background", "aeroplane", "bi static bool syncNN = true; -dai::Pipeline createNNPipeline(std::string nnPath) { +dai::Pipeline createPipeline(std::string nnPath) { dai::Pipeline p; // create nodes @@ -92,7 +92,7 @@ int main(int argc, char** argv) { printf("Using blob at path: %s\n", nnPath.c_str()); // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); + dai::Pipeline p = createPipeline(nnPath); // Connect to device with above created pipeline dai::Device d(p); diff --git a/examples/src/stereo_example.cpp b/examples/src/30_2_stereo_depth_video.cpp similarity index 100% rename from examples/src/stereo_example.cpp rename to examples/src/30_2_stereo_depth_video.cpp From 01abdd7409b6cbfeeaa35bbdbea40532ab89e5d5 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Wed, 14 Apr 2021 18:44:07 +0300 Subject: [PATCH 02/56] Add depth preview --- examples/CMakeLists.txt | 3 ++ examples/src/02_mono_preview.cpp | 8 ++-- examples/src/03_depth_preview.cpp | 61 +++++++++++++++++++++++++++++++ 3 files changed, 68 insertions(+), 4 deletions(-) create mode 100644 examples/src/03_depth_preview.cpp diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index f30d9bb04..a4bc0373d 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -63,6 +63,9 @@ dai_add_example(01_rgb_preview src/01_rgb_preview.cpp) # Mono camera preview output example dai_add_example(02_mono_preview src/02_mono_preview.cpp) +# Depth preview output example +dai_add_example(03_depth_preview src/03_depth_preview.cpp) + # Color camera video output example dai_add_example(28_1_rgb_camera_video src/28_1_rgb_camera_video.cpp) diff --git a/examples/src/02_mono_preview.cpp b/examples/src/02_mono_preview.cpp index 189d97c45..a209bc94f 100644 --- a/examples/src/02_mono_preview.cpp +++ b/examples/src/02_mono_preview.cpp @@ -11,16 +11,16 @@ dai::Pipeline createPipeline() { dai::Pipeline p; auto camLeft = p.create(); + auto camRight = p.create(); + auto xoutLeft = p.create(); + auto xoutRight = p.create(); + camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); - - auto camRight = p.create(); camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); - auto xoutLeft = p.create(); xoutLeft->setStreamName("left"); - auto xoutRight = p.create(); xoutRight->setStreamName("right"); // Link plugins CAM -> XLINK diff --git a/examples/src/03_depth_preview.cpp b/examples/src/03_depth_preview.cpp new file mode 100644 index 000000000..588a4c161 --- /dev/null +++ b/examples/src/03_depth_preview.cpp @@ -0,0 +1,61 @@ + +#include +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +dai::Pipeline createPipeline() { + dai::Pipeline p; + + auto camLeft = p.create(); + auto camRight = p.create(); + auto xout = p.create(); + auto depth = p.create(); + + camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + + depth->setConfidenceThreshold(200); + //depth->setMedianFilter(median); + + // Link plugins CAM -> XLINK + camLeft->out.link(depth->left); + camRight->out.link(depth->right); + + xout->setStreamName("disparity"); + depth->disparity.link(xout->input); + + return p; +} + +int main() { + using namespace std; + + dai::Pipeline p = createPipeline(); + dai::Device d(p); + d.startPipeline(); + + auto dispQueue = d.getOutputQueue("disparity", 4, false); + + while(1) { + auto disparity = dispQueue->get(); + cv::Mat disp(disparity->getHeight(), disparity->getWidth(), CV_8UC1, disparity->getData().data()); + disp.convertTo(disp, CV_8UC1, 3.0); + cv::imshow("disparity", disp); + cv::Mat disp_color; + cv::applyColorMap(disp, disp_color, cv::COLORMAP_JET); + cv::imshow("disparity_color", disp_color); + + int key = cv::waitKey(1); + if(key == 'q') { + return 0; + } + } + + return 0; +} From 8b511e83a388ea92e58941cc919f19bad8a4e1c6 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Thu, 15 Apr 2021 22:43:28 +0300 Subject: [PATCH 03/56] Changed 04 to match with python --- examples/CMakeLists.txt | 4 +- examples/src/04_rgb_encoding.cpp | 68 ++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 2 deletions(-) create mode 100644 examples/src/04_rgb_encoding.cpp diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index a4bc0373d..28ac22424 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -114,8 +114,8 @@ target_compile_definitions(webcam_mobilenet PRIVATE BLOB_PATH="${mobilenet_8shav # MJPEG encoding dai_add_example(mjpeg_encoding src/mjpeg_encoding_example.cpp) -# h264 encoding -dai_add_example(04_h264_encoding src/04_h264_encoding.cpp) +# rgb encoding +dai_add_example(04_rgb_encoding src/04_rgb_encoding.cpp) # Stereo Depth example dai_add_example(30_2_stereo_depth_video src/30_2_stereo_depth_video.cpp) diff --git a/examples/src/04_rgb_encoding.cpp b/examples/src/04_rgb_encoding.cpp new file mode 100644 index 000000000..ef713419d --- /dev/null +++ b/examples/src/04_rgb_encoding.cpp @@ -0,0 +1,68 @@ + +#include +#include +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + + // Keyboard interrupt (Ctrl + C) detected +static bool alive = true; +static void sigintHandler(int signum) { + alive = false; +} + +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + signal(SIGINT, &sigintHandler); + + std::string h265Path("video.h265"); + + // If path specified, use that + if(argc > 1) { + h265Path = std::string(argv[1]); + } + + dai::Pipeline p; + + // Define a source - color camera + auto colorCam = p.create(); + auto xout = p.create(); + auto videnc = p.create(); + + xout->setStreamName("h265"); + colorCam->setBoardSocket(dai::CameraBoardSocket::RGB); + colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); + + videnc->setDefaultProfilePreset(3840, 2160, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); + + // Create outputs + colorCam->video.link(videnc->input); + videnc->bitstream.link(xout->input); + + // Pipeline is defined, now we can connect to the device + dai::Device d(p); + // Start pipeline + d.startPipeline(); + + // Output queue will be used to get the encoded data from the output defined above + auto q = d.getOutputQueue("h265", 30, true); + + // The .h265 file is a raw stream file (not playable yet) + auto videoFile = std::fstream(h265Path, std::ios::out | std::ios::binary); + std::cout << "Press Ctrl+C to stop encoding..." << std::endl; + + while(alive) { + auto h264Packet = q->get(); + videoFile.write((char*)h264Packet->getData().data(), h264Packet->getData().size()); + } + videoFile.close(); + + std::cout << "To view the encoded data, convert the stream file " << h265Path << " into a video file (.mp4) using a command below:" << std::endl; + std::cout << "ffmpeg -framerate " << colorCam->getFps() << " -i " << h265Path << " -c copy video.mp4" << std::endl; + + return 0; +} From 266e9b693a39f142c6f3542cd8a98b809a1fdccc Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Fri, 16 Apr 2021 18:56:15 +0300 Subject: [PATCH 04/56] Add rgb mono encoding example to cpp --- examples/CMakeLists.txt | 9 ++- examples/src/04_rgb_encoding.cpp | 13 +--- examples/src/05_rgb_mono_encoding.cpp | 92 +++++++++++++++++++++++++++ 3 files changed, 101 insertions(+), 13 deletions(-) create mode 100644 examples/src/05_rgb_mono_encoding.cpp diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 28ac22424..6c13d1bd0 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -66,6 +66,12 @@ dai_add_example(02_mono_preview src/02_mono_preview.cpp) # Depth preview output example dai_add_example(03_depth_preview src/03_depth_preview.cpp) +# rgb encoding +dai_add_example(04_rgb_encoding src/04_rgb_encoding.cpp) + +# rgb+mono encoding +dai_add_example(05_rgb_mono_encoding src/05_rgb_mono_encoding.cpp) + # Color camera video output example dai_add_example(28_1_rgb_camera_video src/28_1_rgb_camera_video.cpp) @@ -114,9 +120,6 @@ target_compile_definitions(webcam_mobilenet PRIVATE BLOB_PATH="${mobilenet_8shav # MJPEG encoding dai_add_example(mjpeg_encoding src/mjpeg_encoding_example.cpp) -# rgb encoding -dai_add_example(04_rgb_encoding src/04_rgb_encoding.cpp) - # Stereo Depth example dai_add_example(30_2_stereo_depth_video src/30_2_stereo_depth_video.cpp) diff --git a/examples/src/04_rgb_encoding.cpp b/examples/src/04_rgb_encoding.cpp index ef713419d..4949890a6 100644 --- a/examples/src/04_rgb_encoding.cpp +++ b/examples/src/04_rgb_encoding.cpp @@ -19,13 +19,6 @@ int main(int argc, char** argv) { using namespace std::chrono; signal(SIGINT, &sigintHandler); - std::string h265Path("video.h265"); - - // If path specified, use that - if(argc > 1) { - h265Path = std::string(argv[1]); - } - dai::Pipeline p; // Define a source - color camera @@ -52,7 +45,7 @@ int main(int argc, char** argv) { auto q = d.getOutputQueue("h265", 30, true); // The .h265 file is a raw stream file (not playable yet) - auto videoFile = std::fstream(h265Path, std::ios::out | std::ios::binary); + auto videoFile = std::fstream("video.h265", std::ios::out | std::ios::binary); std::cout << "Press Ctrl+C to stop encoding..." << std::endl; while(alive) { @@ -61,8 +54,8 @@ int main(int argc, char** argv) { } videoFile.close(); - std::cout << "To view the encoded data, convert the stream file " << h265Path << " into a video file (.mp4) using a command below:" << std::endl; - std::cout << "ffmpeg -framerate " << colorCam->getFps() << " -i " << h265Path << " -c copy video.mp4" << std::endl; + std::cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4) using a command below:" << std::endl; + std::cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << std::endl; return 0; } diff --git a/examples/src/05_rgb_mono_encoding.cpp b/examples/src/05_rgb_mono_encoding.cpp new file mode 100644 index 000000000..e76c95cc6 --- /dev/null +++ b/examples/src/05_rgb_mono_encoding.cpp @@ -0,0 +1,92 @@ + +#include +#include +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + + // Keyboard interrupt (Ctrl + C) detected +static bool alive = true; +static void sigintHandler(int signum) { + alive = false; +} + +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + signal(SIGINT, &sigintHandler); + + dai::Pipeline p; + + // Define a source - color camera and 2 mono camera + auto colorCam = p.create(); + auto monoCam = p.create(); + auto monoCam2 = p.create(); + + auto ve1 = p.create(); + auto ve2 = p.create(); + auto ve3 = p.create(); + + auto ve1Out = p.create(); + auto ve2Out = p.create(); + auto ve3Out = p.create(); + + colorCam->setBoardSocket(dai::CameraBoardSocket::RGB); + monoCam->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoCam2->setBoardSocket(dai::CameraBoardSocket::RIGHT); + + ve1Out->setStreamName("ve1Out"); + ve2Out->setStreamName("ve2Out"); + ve3Out->setStreamName("ve3Out"); + + ve1->setDefaultProfilePreset(1280, 720, 30, dai::VideoEncoderProperties::Profile::H264_MAIN); + ve2->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); + ve3->setDefaultProfilePreset(1280, 720, 30, dai::VideoEncoderProperties::Profile::H264_MAIN); + + // Create outputs + monoCam->out.link(ve1->input); + colorCam->video.link(ve2->input); + monoCam2->out.link(ve3->input); + + ve1->bitstream.link(ve1Out->input); + ve2->bitstream.link(ve2Out->input); + ve3->bitstream.link(ve3Out->input); + + // Pipeline is defined, now we can connect to the device + dai::Device d(p); + // Start pipeline + d.startPipeline(); + + // Output queues will be used to get the encoded data from the output defined above + auto outQ1 = d.getOutputQueue("ve1Out", 30, true); + auto outQ2 = d.getOutputQueue("ve2Out", 30, true); + auto outQ3 = d.getOutputQueue("ve3Out", 30, true); + + // The .h264 / .h265 files are raw stream files (not playable yet) + auto videoFile1 = std::fstream("mono1.h264", std::ios::out | std::ios::binary); + auto videoFile2 = std::fstream("color.h265", std::ios::out | std::ios::binary); + auto videoFile3 = std::fstream("mono2.h264", std::ios::out | std::ios::binary); + std::cout << "Press Ctrl+C to stop encoding..." << std::endl; + + while(alive) { + auto out1 = outQ1->get(); + videoFile1.write((char*)out1->getData().data(), out1->getData().size()); + auto out2 = outQ2->get(); + videoFile2.write((char*)out2->getData().data(), out2->getData().size()); + auto out3 = outQ3->get(); + videoFile3.write((char*)out3->getData().data(), out3->getData().size()); + } + videoFile1.close(); + videoFile2.close(); + videoFile3.close(); + + std::cout << "To view the encoded data, convert the stream file (.h264/.h265) into a video file (.mp4), using a command below:" << std::endl; + std::cout << "ffmpeg -framerate 30 -i mono1.h264 -c copy mono1.mp4" << std::endl; + std::cout << "ffmpeg -framerate 30 -i mono2.h264 -c copy mono2.mp4" << std::endl; + std::cout << "ffmpeg -framerate 30 -i color.h265 -c copy color.mp4" << std::endl; + + return 0; +} From 9c0f6c0fcbafc37053dc9082fa8750bd1476daf0 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Mon, 19 Apr 2021 21:37:29 +0300 Subject: [PATCH 05/56] Add mono camera control and fixed encodings --- examples/src/04_rgb_encoding.cpp | 21 ++- examples/src/05_rgb_mono_encoding.cpp | 22 +--- examples/src/14_2_mono_camera_control.cpp | 152 ++++++++++++++++++++++ 3 files changed, 167 insertions(+), 28 deletions(-) create mode 100644 examples/src/14_2_mono_camera_control.cpp diff --git a/examples/src/04_rgb_encoding.cpp b/examples/src/04_rgb_encoding.cpp index 4949890a6..fc566c1ba 100644 --- a/examples/src/04_rgb_encoding.cpp +++ b/examples/src/04_rgb_encoding.cpp @@ -1,23 +1,19 @@ - #include #include #include -#include "utility.hpp" - -// Inludes common necessary includes for development using depthai library +// Includes common necessary includes for development using depthai library #include "depthai/depthai.hpp" // Keyboard interrupt (Ctrl + C) detected -static bool alive = true; +static std::atomic alive{true}; static void sigintHandler(int signum) { alive = false; } int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - signal(SIGINT, &sigintHandler); + // Detect signal interrupts (Ctrl + C) + std::signal(SIGINT, &sigintHandler); dai::Pipeline p; @@ -45,17 +41,16 @@ int main(int argc, char** argv) { auto q = d.getOutputQueue("h265", 30, true); // The .h265 file is a raw stream file (not playable yet) - auto videoFile = std::fstream("video.h265", std::ios::out | std::ios::binary); + auto videoFile = std::ofstream("video.h265", std::ios::binary); std::cout << "Press Ctrl+C to stop encoding..." << std::endl; while(alive) { - auto h264Packet = q->get(); - videoFile.write((char*)h264Packet->getData().data(), h264Packet->getData().size()); + auto h265Packet = q->get(); + videoFile.write((char*)(h265Packet->getData().data()), h265Packet->getData().size()); } - videoFile.close(); std::cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4) using a command below:" << std::endl; std::cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << std::endl; return 0; -} +} \ No newline at end of file diff --git a/examples/src/05_rgb_mono_encoding.cpp b/examples/src/05_rgb_mono_encoding.cpp index e76c95cc6..c8a489343 100644 --- a/examples/src/05_rgb_mono_encoding.cpp +++ b/examples/src/05_rgb_mono_encoding.cpp @@ -1,4 +1,3 @@ - #include #include #include @@ -9,7 +8,7 @@ #include "depthai/depthai.hpp" // Keyboard interrupt (Ctrl + C) detected -static bool alive = true; +static std::atomic alive{true}; static void sigintHandler(int signum) { alive = false; } @@ -17,7 +16,8 @@ static void sigintHandler(int signum) { int main(int argc, char** argv) { using namespace std; using namespace std::chrono; - signal(SIGINT, &sigintHandler); + std::signal(SIGINT, &sigintHandler); + dai::Pipeline p; @@ -25,11 +25,9 @@ int main(int argc, char** argv) { auto colorCam = p.create(); auto monoCam = p.create(); auto monoCam2 = p.create(); - auto ve1 = p.create(); auto ve2 = p.create(); auto ve3 = p.create(); - auto ve1Out = p.create(); auto ve2Out = p.create(); auto ve3Out = p.create(); @@ -37,11 +35,9 @@ int main(int argc, char** argv) { colorCam->setBoardSocket(dai::CameraBoardSocket::RGB); monoCam->setBoardSocket(dai::CameraBoardSocket::LEFT); monoCam2->setBoardSocket(dai::CameraBoardSocket::RIGHT); - ve1Out->setStreamName("ve1Out"); ve2Out->setStreamName("ve2Out"); ve3Out->setStreamName("ve3Out"); - ve1->setDefaultProfilePreset(1280, 720, 30, dai::VideoEncoderProperties::Profile::H264_MAIN); ve2->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); ve3->setDefaultProfilePreset(1280, 720, 30, dai::VideoEncoderProperties::Profile::H264_MAIN); @@ -50,7 +46,6 @@ int main(int argc, char** argv) { monoCam->out.link(ve1->input); colorCam->video.link(ve2->input); monoCam2->out.link(ve3->input); - ve1->bitstream.link(ve1Out->input); ve2->bitstream.link(ve2Out->input); ve3->bitstream.link(ve3Out->input); @@ -66,9 +61,9 @@ int main(int argc, char** argv) { auto outQ3 = d.getOutputQueue("ve3Out", 30, true); // The .h264 / .h265 files are raw stream files (not playable yet) - auto videoFile1 = std::fstream("mono1.h264", std::ios::out | std::ios::binary); - auto videoFile2 = std::fstream("color.h265", std::ios::out | std::ios::binary); - auto videoFile3 = std::fstream("mono2.h264", std::ios::out | std::ios::binary); + auto videoFile1 = std::ofstream("mono1.h264", std::ios::binary); + auto videoFile2 = std::ofstream("color.h265", std::ios::binary); + auto videoFile3 = std::ofstream("mono2.h264", std::ios::binary); std::cout << "Press Ctrl+C to stop encoding..." << std::endl; while(alive) { @@ -79,9 +74,6 @@ int main(int argc, char** argv) { auto out3 = outQ3->get(); videoFile3.write((char*)out3->getData().data(), out3->getData().size()); } - videoFile1.close(); - videoFile2.close(); - videoFile3.close(); std::cout << "To view the encoded data, convert the stream file (.h264/.h265) into a video file (.mp4), using a command below:" << std::endl; std::cout << "ffmpeg -framerate 30 -i mono1.h264 -c copy mono1.mp4" << std::endl; @@ -89,4 +81,4 @@ int main(int argc, char** argv) { std::cout << "ffmpeg -framerate 30 -i color.h265 -c copy color.mp4" << std::endl; return 0; -} +} \ No newline at end of file diff --git a/examples/src/14_2_mono_camera_control.cpp b/examples/src/14_2_mono_camera_control.cpp new file mode 100644 index 000000000..3d9ad628e --- /dev/null +++ b/examples/src/14_2_mono_camera_control.cpp @@ -0,0 +1,152 @@ +/** + * This example shows usage of mono camera in crop mode with the possibility to move the crop. + * Uses 'WASD' controls to move the crop window, 'T' to trigger autofocus, 'IOKL,.' for manual exposure/focus: + * Control: key[dec/inc] min..max + * exposure time: I O 1..33000 [us] + * sensitivity iso: K L 100..1600 + * To go back to auto controls: + * 'E' - autoexposure + */ +#include +#include + +// Includes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// Step size ('W','A','S','D' controls) +static constexpr float stepSize = 0.02; + +// Manual exposure/focus set step +static constexpr int EXP_STEP = 500; // us +static constexpr int ISO_STEP = 50; + +static int clamp(int num, int v0, int v1) { + return std::max(v0, std::min(num, v1)); +} + +static std::atomic sendCamConfig{false}; + +int main() { + // Start defining a pipeline + dai::Pipeline pipeline; + + // Nodes + auto camRight = pipeline.create(); + auto camLeft = pipeline.create(); + auto manipRight = pipeline.create(); + auto manipLeft = pipeline.create(); + auto controlIn = pipeline.create(); + auto configIn = pipeline.create(); + auto manipOutRight = pipeline.create(); + auto manipOutLeft = pipeline.create(); + + // Crop range + dai::Point2f topLeft(0.4f, 0.4f); + dai::Point2f bottomRight(0.6f, 0.6f); + + // Properties + camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + controlIn->setStreamName("control"); + configIn->setStreamName("config"); + manipOutRight->setStreamName("right"); + manipOutLeft->setStreamName("left"); + manipRight->initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); + manipLeft->initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); + + // Link nodes + camRight->out.link(manipRight->inputImage); + camLeft->out.link(manipLeft->inputImage); + controlIn->out.link(camRight->inputControl); + controlIn->out.link(camLeft->inputControl); + configIn->out.link(manipRight->inputConfig); + configIn->out.link(manipLeft->inputConfig); + manipRight->out.link(manipOutRight->input); + manipLeft->out.link(manipOutLeft->input); + + // Connect to device + dai::Device dev(pipeline); + + // Start pipeline + dev.startPipeline(); + + // Queues + auto qRight = dev.getOutputQueue(manipOutRight->getStreamName(), 4, false); + auto qLeft = dev.getOutputQueue(manipOutLeft->getStreamName(), 4, false); + auto controlQueue = dev.getInputQueue(controlIn->getStreamName()); + auto configQueue = dev.getInputQueue(configIn->getStreamName()); + + // Defaults and limits for manual focus/exposure controls + int exp_time = 20000; + int exp_min = 1; + int exp_max = 33000; + + int sens_iso = 800; + int sens_min = 100; + int sens_max = 1600; + + while(true) { + auto inRight = qRight->get(); + auto inLeft = qLeft->get(); + auto frameRight = inRight->getCvFrame(); + auto frameLeft = inLeft->getCvFrame(); + cv::imshow("right", frameRight); + cv::imshow("left", frameLeft); + + // Update screen (10ms pooling rate) + int key = cv::waitKey(10); + if(key == 'q') { + break; + } else if(key == 'e') { + printf("Autoexposure enable\n"); + dai::CameraControl ctrl; + ctrl.setAutoExposureEnable(); + controlQueue->send(ctrl); + } else if(key == 'i' || key == 'o' || key == 'k' || key == 'l') { + if(key == 'i') exp_time -= EXP_STEP; + if(key == 'o') exp_time += EXP_STEP; + if(key == 'k') sens_iso -= ISO_STEP; + if(key == 'l') sens_iso += ISO_STEP; + exp_time = clamp(exp_time, exp_min, exp_max); + sens_iso = clamp(sens_iso, sens_min, sens_max); + printf("Setting manual exposure, time: %d, iso: %d\n", exp_time, sens_iso); + dai::CameraControl ctrl; + ctrl.setManualExposure(exp_time, sens_iso); + controlQueue->send(ctrl); + } else if(key == 'w') { + if (topLeft.y - stepSize >= 0) { + topLeft.y -= stepSize; + bottomRight.y -= stepSize; + sendCamConfig = true; + } + } else if(key == 'a') { + if (topLeft.x - stepSize >= 0) { + topLeft.x -= stepSize; + bottomRight.x -= stepSize; + sendCamConfig = true; + } + } else if(key == 's') { + if (bottomRight.y + stepSize <= 1) { + topLeft.y += stepSize; + bottomRight.y += stepSize; + sendCamConfig = true; + } + } else if(key == 'd') { + if (bottomRight.x + stepSize <= 1) { + topLeft.x += stepSize; + bottomRight.x +=stepSize; + sendCamConfig = true; + } + } + + // Send new config to camera + if (sendCamConfig) { + dai::ImageManipConfig cfg; + cfg.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); + configQueue->send(cfg); + sendCamConfig = false; + } + } +} \ No newline at end of file From 295f4c7b7a01af2e3e61ec5ca2d3defdbcee70af Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Mon, 19 Apr 2021 22:30:07 +0300 Subject: [PATCH 06/56] Add depth crop control --- examples/src/05_rgb_mono_encoding.cpp | 2 - examples/src/14_2_mono_camera_control.cpp | 8 +- examples/src/14_3_depth_crop_control.cpp | 111 ++++++++++++++++++++++ 3 files changed, 115 insertions(+), 6 deletions(-) create mode 100644 examples/src/14_3_depth_crop_control.cpp diff --git a/examples/src/05_rgb_mono_encoding.cpp b/examples/src/05_rgb_mono_encoding.cpp index c8a489343..81b13b268 100644 --- a/examples/src/05_rgb_mono_encoding.cpp +++ b/examples/src/05_rgb_mono_encoding.cpp @@ -2,8 +2,6 @@ #include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" diff --git a/examples/src/14_2_mono_camera_control.cpp b/examples/src/14_2_mono_camera_control.cpp index 3d9ad628e..e247f801c 100644 --- a/examples/src/14_2_mono_camera_control.cpp +++ b/examples/src/14_2_mono_camera_control.cpp @@ -41,8 +41,8 @@ int main() { auto manipOutLeft = pipeline.create(); // Crop range - dai::Point2f topLeft(0.4f, 0.4f); - dai::Point2f bottomRight(0.6f, 0.6f); + dai::Point2f topLeft(0.2, 0.2); + dai::Point2f bottomRight(0.8, 0.8); // Properties camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); @@ -90,8 +90,8 @@ int main() { while(true) { auto inRight = qRight->get(); auto inLeft = qLeft->get(); - auto frameRight = inRight->getCvFrame(); - auto frameLeft = inLeft->getCvFrame(); + cv::Mat frameRight = inRight->getCvFrame(); + cv::Mat frameLeft = inLeft->getCvFrame(); cv::imshow("right", frameRight); cv::imshow("left", frameLeft); diff --git a/examples/src/14_3_depth_crop_control.cpp b/examples/src/14_3_depth_crop_control.cpp new file mode 100644 index 000000000..36fe779c4 --- /dev/null +++ b/examples/src/14_3_depth_crop_control.cpp @@ -0,0 +1,111 @@ +/** + * This example shows usage of depth camera in crop mode with the possibility to move the crop. + * Use 'WASD' in order to do it. + */ +#include +#include + +// Includes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// Step size ('W','A','S','D' controls) +static constexpr float stepSize = 0.02; + +static std::atomic sendCamConfig{false}; + +int main() { + // Start defining a pipeline + dai::Pipeline pipeline; + + // Nodes + auto camRight = pipeline.create(); + auto camLeft = pipeline.create(); + auto manip = pipeline.create(); + auto stereo = pipeline.create(); + auto configIn = pipeline.create(); + auto xout = pipeline.create(); + + // Crop range + dai::Point2f topLeft(0.2, 0.2); + dai::Point2f bottomRight(0.8, 0.8); + + // Properties + camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + configIn->setStreamName("config"); + xout->setStreamName("depth"); + manip->initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); + manip->setMaxOutputFrameSize(camRight->getResolutionHeight() * camRight->getResolutionWidth() * 3); + stereo->setConfidenceThreshold(200); + stereo->setOutputDepth(true); + + // Link nodes + configIn->out.link(manip->inputConfig); + stereo->depth.link(manip->inputImage); + manip->out.link(xout->input); + camRight->out.link(stereo->left); + camLeft->out.link(stereo->right); + + // Connect to device + dai::Device dev(pipeline); + + // Start pipeline + dev.startPipeline(); + + // Queues + auto q = dev.getOutputQueue(xout->getStreamName(), 4, false); + auto configQueue = dev.getInputQueue(configIn->getStreamName()); + + while(true) { + auto inDepth = q->get(); + cv::Mat depthFrame = inDepth ->getFrame(); + // Frame is transformed, the color map will be applied to highlight the depth info + cv::Mat depthFrameColor; + cv::normalize(depthFrame, depthFrameColor, 255, 0, cv::NORM_INF, CV_8UC1); + cv::equalizeHist(depthFrameColor, depthFrameColor); + cv::applyColorMap(depthFrameColor, depthFrameColor, cv::COLORMAP_HOT); + + // Frame is ready to be shown + cv::imshow("depth", depthFrameColor); + + // Update screen (10ms pooling rate) + int key = cv::waitKey(10); + if(key == 'q') { + break; + } else if(key == 'w') { + if (topLeft.y - stepSize >= 0) { + topLeft.y -= stepSize; + bottomRight.y -= stepSize; + sendCamConfig = true; + } + } else if(key == 'a') { + if (topLeft.x - stepSize >= 0) { + topLeft.x -= stepSize; + bottomRight.x -= stepSize; + sendCamConfig = true; + } + } else if(key == 's') { + if (bottomRight.y + stepSize <= 1) { + topLeft.y += stepSize; + bottomRight.y += stepSize; + sendCamConfig = true; + } + } else if(key == 'd') { + if (bottomRight.x + stepSize <= 1) { + topLeft.x += stepSize; + bottomRight.x +=stepSize; + sendCamConfig = true; + } + } + + // Send new config to camera + if (sendCamConfig) { + dai::ImageManipConfig cfg; + cfg.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); + configQueue->send(cfg); + sendCamConfig = false; + } + } +} \ No newline at end of file From 507d569660a45e2bf7d5731d54242b1d8b1d5250 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Tue, 20 Apr 2021 19:00:19 +0300 Subject: [PATCH 07/56] Add rgb and mono full resolution saver --- examples/src/06_rgb_full_resolution_saver.cpp | 66 +++++++++++++++++++ .../src/07_mono_full_resolution_saver.cpp | 56 ++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100644 examples/src/06_rgb_full_resolution_saver.cpp create mode 100644 examples/src/07_mono_full_resolution_saver.cpp diff --git a/examples/src/06_rgb_full_resolution_saver.cpp b/examples/src/06_rgb_full_resolution_saver.cpp new file mode 100644 index 000000000..f437fab91 --- /dev/null +++ b/examples/src/06_rgb_full_resolution_saver.cpp @@ -0,0 +1,66 @@ +#include +#include +#include +#include "utility.hpp" +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +int main(int argc, char** argv) +{ + using namespace std::chrono; + + dai::Pipeline p; + + // Define a source - color camera + auto colorCam = p.create(); + auto xoutJpeg = p.create(); + auto xoutRgb = p.create(); + auto videoEnc = p.create(); + + colorCam->setBoardSocket(dai::CameraBoardSocket::RGB); + colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); + xoutJpeg->setStreamName("jpeg"); + xoutRgb->setStreamName("rgb"); + videoEnc->setDefaultProfilePreset(3840, 2160, 30, dai::VideoEncoderProperties::Profile::MJPEG); + + // Create outputs + colorCam->video.link(xoutRgb->input); + colorCam->video.link(videoEnc->input); + videoEnc->bitstream.link(xoutJpeg->input); + + // Pipeline is defined, now we can connect to the device + dai::Device d(p); + // Start pipeline + d.startPipeline(); + + // Queues + auto qRgb = d.getOutputQueue("rgb", 30, false); + auto qJpeg = d.getOutputQueue("jpeg", 30, true); + + mkdir("06_data", 0777); + + while(true) { + auto inRgb = qRgb->tryGet(); + if (inRgb != NULL){ + cv::Mat frame = inRgb->getCvFrame(); + cv::imshow("rgb", frame); + } + + auto encFrames = qJpeg->tryGetAll(); + for(const auto& encFrame : encFrames){ + uint64_t time = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); + std::stringstream videoStr; + videoStr << "06_data/" << time << ".jpeg"; + auto videoFile = std::ofstream(videoStr.str(), std::ios::binary); + videoFile.write((char*)encFrame->getData().data(), encFrame->getData().size()); + } + + int key = cv::waitKey(1); + if(key == 'q') + return 0; + } + + return 0; +} diff --git a/examples/src/07_mono_full_resolution_saver.cpp b/examples/src/07_mono_full_resolution_saver.cpp new file mode 100644 index 000000000..1deb90626 --- /dev/null +++ b/examples/src/07_mono_full_resolution_saver.cpp @@ -0,0 +1,56 @@ +#include +#include +#include +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +int main(int argc, char** argv) +{ + using namespace std; + using namespace std::chrono; + + dai::Pipeline p; + + // Define a source - color camera + auto camRight = p.create(); + auto xoutRight = p.create(); + + camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + xoutRight->setStreamName("right"); + + // Create outputs + camRight->out.link(xoutRight->input); + + + // Pipeline is defined, now we can connect to the device + dai::Device d(p); + // Start pipeline + d.startPipeline(); + + // Queue + auto q = d.getOutputQueue("right", 4, false); + + mkdir("07_data", 0777); + + while(true) { + auto inRight = q->get(); + cv::Mat frameRight = inRight->getCvFrame(); + // Frame is transformed and ready to be shown + cv::imshow("right", frameRight); + + uint64_t time = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); + std::stringstream videoStr; + videoStr << "07_data/" << time << ".png"; + // After showing the frame, it's being stored inside a target directory as a PNG image + cv::imwrite(videoStr.str(), frameRight); + + int key = cv::waitKey(1); + if(key == 'q') + return 0; + } + + return 0; +} From f6fca64037a9208dd261f47c52966a9f726772d5 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Thu, 22 Apr 2021 20:14:33 +0300 Subject: [PATCH 08/56] Add mono mobilenet cpp example --- examples/src/09_mono_mobilenet.cpp | 117 +++++++++++++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 examples/src/09_mono_mobilenet.cpp diff --git a/examples/src/09_mono_mobilenet.cpp b/examples/src/09_mono_mobilenet.cpp new file mode 100644 index 000000000..8616dbcb2 --- /dev/null +++ b/examples/src/09_mono_mobilenet.cpp @@ -0,0 +1,117 @@ +#include +#include +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline p; + + // Define source + auto camRight = p.create(); + auto manip = p.create(); + auto manipOut = p.create(); + auto detectionNetwork = p.create(); + auto nnOut = p.create(); + + // Properties + camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + detectionNetwork->setConfidenceThreshold(0.5); + detectionNetwork->setBlobPath(nnPath); + detectionNetwork->setNumInferenceThreads(2); + manip->initialConfig.setResize(300, 300); + manip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); + manipOut->setStreamName("right"); + nnOut->setStreamName("detections"); + + // Create outputs + camRight->out.link(manip->inputImage); + manip->out.link(detectionNetwork->input); + manip->out.link(manipOut->input); + detectionNetwork->out.link(nnOut->input); + + // Connect to device with above created pipeline + dai::Device d(p); + // Start the pipeline + d.startPipeline(); + + auto qRight = d.getOutputQueue("right", 4, false); + auto detections = d.getOutputQueue("detections", 4, false); + + auto startTime = steady_clock::now(); + int counter = 0; + float fps = 0; + while(true) { + auto inRight = qRight->get(); + auto det = detections->get(); + cv::Mat frame = inRight->getCvFrame(); + + counter++; + auto currentTime = steady_clock::now(); + auto elapsed = duration_cast>(currentTime - startTime); + if(elapsed > seconds(1)) { + fps = counter / elapsed.count(); + counter = 0; + startTime = currentTime; + } + + auto color = cv::Scalar(255, 255, 255); + auto dets = det->detections; + for(const auto& d : dets) { + int x1 = d.xmin * frame.cols; + int y1 = d.ymin * frame.rows; + int x2 = d.xmax * frame.cols; + int y2 = d.ymax * frame.rows; + + int labelIndex = d.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << d.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + + std::stringstream fpsStr; + fpsStr << std::fixed << std::setprecision(2) << fps; + cv::putText(frame, fpsStr.str(), cv::Point(2, inRight->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); + + cv::imshow("right", frame); + + int key = cv::waitKey(1); + if(key == 'q') + return 0; + } + + return 0; +} From f3feafbb02403141324c07f29f5a181308641fb4 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Thu, 22 Apr 2021 23:09:15 +0300 Subject: [PATCH 09/56] Add mono depth mobilenet example --- examples/src/09_mono_mobilenet.cpp | 62 ++++----- examples/src/10_mono_depth_mobilenetssd.cpp | 134 ++++++++++++++++++++ 2 files changed, 157 insertions(+), 39 deletions(-) create mode 100644 examples/src/10_mono_depth_mobilenetssd.cpp diff --git a/examples/src/09_mono_mobilenet.cpp b/examples/src/09_mono_mobilenet.cpp index 8616dbcb2..97c3b3dcb 100644 --- a/examples/src/09_mono_mobilenet.cpp +++ b/examples/src/09_mono_mobilenet.cpp @@ -1,6 +1,5 @@ #include #include -#include #include "utility.hpp" @@ -14,8 +13,6 @@ static const std::vector labelMap = {"background", "aeroplane", "bi int main(int argc, char** argv) { using namespace std; - using namespace std::chrono; - // Default blob path provided by Hunter private data download // Applicable for easier example usage only std::string nnPath(BLOB_PATH); @@ -63,55 +60,42 @@ int main(int argc, char** argv) { auto qRight = d.getOutputQueue("right", 4, false); auto detections = d.getOutputQueue("detections", 4, false); - auto startTime = steady_clock::now(); - int counter = 0; - float fps = 0; - while(true) { - auto inRight = qRight->get(); - auto det = detections->get(); - cv::Mat frame = inRight->getCvFrame(); - - counter++; - auto currentTime = steady_clock::now(); - auto elapsed = duration_cast>(currentTime - startTime); - if(elapsed > seconds(1)) { - fps = counter / elapsed.count(); - counter = 0; - startTime = currentTime; - } - - auto color = cv::Scalar(255, 255, 255); - auto dets = det->detections; - for(const auto& d : dets) { - int x1 = d.xmin * frame.cols; - int y1 = d.ymin * frame.rows; - int x2 = d.xmax * frame.cols; - int y2 = d.ymax * frame.rows; - - int labelIndex = d.label; + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, auto frame, auto detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; std::string labelStr = to_string(labelIndex); if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << d.confidence * 100; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } + // Show the frame + cv::imshow(name, frame); + }; - std::stringstream fpsStr; - fpsStr << std::fixed << std::setprecision(2) << fps; - cv::putText(frame, fpsStr.str(), cv::Point(2, inRight->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); + while(true) { + auto inRight = qRight->get(); + auto inDet = detections->get(); + auto detections = inDet->detections; + cv::Mat frame = inRight->getCvFrame(); - cv::imshow("right", frame); + displayFrame("right", frame, detections); int key = cv::waitKey(1); - if(key == 'q') + if(key == 'q' || key == 'Q') return 0; } - return 0; -} +} \ No newline at end of file diff --git a/examples/src/10_mono_depth_mobilenetssd.cpp b/examples/src/10_mono_depth_mobilenetssd.cpp new file mode 100644 index 000000000..b126d2a6f --- /dev/null +++ b/examples/src/10_mono_depth_mobilenetssd.cpp @@ -0,0 +1,134 @@ +#include +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +static std::atomic flipRectified{true}; + +int main(int argc, char** argv) { + using namespace std; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline p; + + // Define source + auto camRight = p.create(); + auto camLeft = p.create(); + auto stereo = p.create(); + auto manip = p.create(); + auto nn = p.create(); + auto nnOut = p.create(); + auto disparityOut = p.create(); + auto xoutRight = p.create(); + + // Properties + camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + stereo->setOutputRectified(true); + stereo->setConfidenceThreshold(255); + stereo->setRectifyEdgeFillColor(0); + manip->initialConfig.setResize(300, 300); + manip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + disparityOut->setStreamName("disparity"); + xoutRight->setStreamName("rectifiedRight"); + nnOut->setStreamName("nn"); + + // Create outputs + camRight->out.link(stereo->right); + camLeft->out.link(stereo->left); + stereo->rectifiedRight.link(manip->inputImage); + stereo->disparity.link(disparityOut->input); + manip->out.link(nn->input); + manip->out.link(xoutRight->input); + nn->out.link(nnOut->input); + + // Connect to device with above created pipeline + dai::Device d(p); + // Start the pipeline + d.startPipeline(); + + // Queuess + auto qRight = d.getOutputQueue("rectifiedRight", 4, false); + auto qDisparity = d.getOutputQueue("disparity", 4, false); + auto qDet = d.getOutputQueue("nn", 4, false); + + // Add bounding boxes and text to the frame and show it to the user + auto show = [](std::string name, auto frame, auto detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; + + float disparity_multiplier = 255 / 95; + while(true) { + auto inRight = qRight->get(); + auto inDet = qDet->get(); + auto inDisparity = qDisparity->get(); + auto detections = inDet->detections; + cv::Mat rightFrame = inRight->getCvFrame(); + cv::Mat disparityFrame = inDisparity->getCvFrame(); + + if (flipRectified){ + cv::flip(rightFrame, rightFrame, 1); + + for(auto& detection : detections) { + auto swap = detection.xmin; + detection.xmin = 1 - detection.xmax; + detection.xmax = 1 - swap; + } + } + disparityFrame.convertTo(disparityFrame, CV_8UC1, disparity_multiplier); + //Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html + cv::applyColorMap(disparityFrame, disparityFrame, cv::COLORMAP_JET); + show("disparity", disparityFrame, detections); + show("rectified right", rightFrame, detections); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') + return 0; + } + return 0; +} \ No newline at end of file From f209884cf0b201fb6d7cbee248fd7599b83d19fd Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Thu, 22 Apr 2021 23:41:44 +0300 Subject: [PATCH 10/56] Synch --- examples/src/09_mono_mobilenet.cpp | 21 +++++++------- examples/src/10_mono_depth_mobilenetssd.cpp | 31 ++++++++++----------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/examples/src/09_mono_mobilenet.cpp b/examples/src/09_mono_mobilenet.cpp index 97c3b3dcb..699f93aaa 100644 --- a/examples/src/09_mono_mobilenet.cpp +++ b/examples/src/09_mono_mobilenet.cpp @@ -26,14 +26,14 @@ int main(int argc, char** argv) { printf("Using blob at path: %s\n", nnPath.c_str()); // Create pipeline - dai::Pipeline p; + dai::Pipeline pipeline; // Define source - auto camRight = p.create(); - auto manip = p.create(); - auto manipOut = p.create(); - auto detectionNetwork = p.create(); - auto nnOut = p.create(); + auto camRight = pipeline.create(); + auto manip = pipeline.create(); + auto manipOut = pipeline.create(); + auto detectionNetwork = pipeline.create(); + auto nnOut = pipeline.create(); // Properties camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); @@ -53,12 +53,13 @@ int main(int argc, char** argv) { detectionNetwork->out.link(nnOut->input); // Connect to device with above created pipeline - dai::Device d(p); + dai::Device device(pipeline); // Start the pipeline - d.startPipeline(); + device.startPipeline(); - auto qRight = d.getOutputQueue("right", 4, false); - auto detections = d.getOutputQueue("detections", 4, false); + // Queues + auto qRight = device.getOutputQueue("right", 4, false); + auto detections = device.getOutputQueue("detections", 4, false); // Add bounding boxes and text to the frame and show it to the user auto displayFrame = [](std::string name, auto frame, auto detections) { diff --git a/examples/src/10_mono_depth_mobilenetssd.cpp b/examples/src/10_mono_depth_mobilenetssd.cpp index b126d2a6f..526c9c556 100644 --- a/examples/src/10_mono_depth_mobilenetssd.cpp +++ b/examples/src/10_mono_depth_mobilenetssd.cpp @@ -28,17 +28,17 @@ int main(int argc, char** argv) { printf("Using blob at path: %s\n", nnPath.c_str()); // Create pipeline - dai::Pipeline p; + dai::Pipeline pipeline; // Define source - auto camRight = p.create(); - auto camLeft = p.create(); - auto stereo = p.create(); - auto manip = p.create(); - auto nn = p.create(); - auto nnOut = p.create(); - auto disparityOut = p.create(); - auto xoutRight = p.create(); + auto camRight = pipeline.create(); + auto camLeft = pipeline.create(); + auto stereo = pipeline.create(); + auto manip = pipeline.create(); + auto nn = pipeline.create(); + auto nnOut = pipeline.create(); + auto disparityOut = pipeline.create(); + auto xoutRight = pipeline.create(); // Properties camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); @@ -53,7 +53,6 @@ int main(int argc, char** argv) { nn->setConfidenceThreshold(0.5); nn->setBlobPath(nnPath); nn->setNumInferenceThreads(2); - nn->input.setBlocking(false); disparityOut->setStreamName("disparity"); xoutRight->setStreamName("rectifiedRight"); nnOut->setStreamName("nn"); @@ -68,14 +67,14 @@ int main(int argc, char** argv) { nn->out.link(nnOut->input); // Connect to device with above created pipeline - dai::Device d(p); + dai::Device device(pipeline); // Start the pipeline - d.startPipeline(); + device.startPipeline(); - // Queuess - auto qRight = d.getOutputQueue("rectifiedRight", 4, false); - auto qDisparity = d.getOutputQueue("disparity", 4, false); - auto qDet = d.getOutputQueue("nn", 4, false); + // Queues + auto qRight = device.getOutputQueue("rectifiedRight", 4, false); + auto qDisparity = device.getOutputQueue("disparity", 4, false); + auto qDet = device.getOutputQueue("nn", 4, false); // Add bounding boxes and text to the frame and show it to the user auto show = [](std::string name, auto frame, auto detections) { From f1fb1ed3a2ed0d67615a5d3b180e91f222203d50 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Sat, 24 Apr 2021 00:24:20 +0300 Subject: [PATCH 11/56] Add rgb encoding mono mobilenet with depth --- .../src/11_rgb_encoding_mono_mobilenet.cpp | 150 +++++++++++++ .../12_rgb_encoding_mono_mobilenet_depth.cpp | 198 ++++++++++++++++++ 2 files changed, 348 insertions(+) create mode 100644 examples/src/11_rgb_encoding_mono_mobilenet.cpp create mode 100644 examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp diff --git a/examples/src/11_rgb_encoding_mono_mobilenet.cpp b/examples/src/11_rgb_encoding_mono_mobilenet.cpp new file mode 100644 index 000000000..bc386f8f3 --- /dev/null +++ b/examples/src/11_rgb_encoding_mono_mobilenet.cpp @@ -0,0 +1,150 @@ +#include +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +int main(int argc, char** argv) { + using namespace std; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define source + auto camRgb = pipeline.create(); + auto camRight = pipeline.create(); + auto videoEncoder = pipeline.create(); + auto nn = pipeline.create(); + auto manip = pipeline.create(); + + auto xoutRight = pipeline.create(); + auto manipOut = pipeline.create(); + auto nnOut = pipeline.create(); + auto videoOut = pipeline.create(); + + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + videoEncoder->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); + + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + + // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) + manip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); + manip->initialConfig.setResize(300, 300); + + videoOut->setStreamName("h265"); + xoutRight->setStreamName("right"); + manipOut->setStreamName("manip"); + nnOut->setStreamName("nn"); + + // Create outputs + camRgb->video.link(videoEncoder->input); + videoEncoder->bitstream.link(videoOut->input); + camRight->out.link(manip->inputImage); + manip->out.link(nn->input); + camRight->out.link(xoutRight->input); + manip->out.link(manipOut->input); + nn->out.link(nnOut->input); + + // Connect to device with above created pipeline + dai::Device device(pipeline); + // Start the pipeline + device.startPipeline(); + + // Queues + int queueSize = 8; + auto qRight = device.getOutputQueue("right", queueSize); + auto qManip = device.getOutputQueue("manip", queueSize); + auto qDet = device.getOutputQueue("nn", queueSize); + auto qRgbEnc = device.getOutputQueue("h265", 30, true); + + auto videoFile = std::ofstream("video.h265", std::ios::binary); + cv::namedWindow("right", cv::WINDOW_NORMAL); + cv::namedWindow("manip", cv::WINDOW_NORMAL); + + while(true) { + auto inRight = qRight->get(); + auto inManip = qManip->get(); + auto inDet = qDet->get(); + auto detections = inDet->detections; + cv::Mat frame = inRight->getCvFrame(); + cv::Mat frameManip = inManip->getCvFrame(); + + auto out1 = qRgbEnc->get(); + videoFile.write((char*)out1->getData().data(), out1->getData().size()); + + int offsetX = (camRight->getResolutionWidth() - camRight->getResolutionHeight()) / 2; + auto color = cv::Scalar(255, 0, 0); + for(auto& detection : detections) { + int x1 = detection.xmin * camRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * camRight->getResolutionHeight(); + int x2 = detection.xmax * camRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * camRight->getResolutionHeight(); + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow("right", frame); + + for(auto& detection : detections) { + int x1 = detection.xmin * frameManip.cols; + int y1 = detection.ymin * frameManip.rows; + int x2 = detection.xmax * frameManip.cols; + int y2 = detection.ymax * frameManip.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frameManip, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frameManip, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frameManip, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow("manip", frameManip); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q'){ + std::cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << std::endl; + std::cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << std::endl; + return 0; + } + } + return 0; +} \ No newline at end of file diff --git a/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp b/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp new file mode 100644 index 000000000..1f61dd0f3 --- /dev/null +++ b/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp @@ -0,0 +1,198 @@ +#include +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +static std::atomic flipRectified{true}; + +int main(int argc, char** argv) { + using namespace std; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define source + auto camRgb = pipeline.create(); + auto videoEncoder = pipeline.create(); + auto camLeft = pipeline.create(); + auto camRight = pipeline.create(); + auto depth = pipeline.create(); + auto nn = pipeline.create(); + auto manip = pipeline.create(); + + auto videoOut = pipeline.create(); + auto xoutRight = pipeline.create(); + auto disparityOut = pipeline.create(); + auto manipOut = pipeline.create(); + auto nnOut = pipeline.create(); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + videoEncoder->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); + camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + + depth->setConfidenceThreshold(255); + depth->setOutputRectified(true); + depth->setRectifyMirrorFrame(false); + depth->setRectifyEdgeFillColor(0); + + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + + // The NN model expects BGR input-> By default ImageManip output type would be same as input (gray in this case) + manip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); + manip->initialConfig.setResize(300, 300); + + videoOut->setStreamName("h265"); + disparityOut->setStreamName("disparity"); + xoutRight->setStreamName("right"); + manipOut->setStreamName("manip"); + nnOut->setStreamName("nn"); + + // Create outputs + camRgb->video.link(videoEncoder->input); + videoEncoder->bitstream.link(videoOut->input); + camRight->out.link(xoutRight->input); + camRight->out.link(depth->right); + camLeft->out.link(depth->left); + depth->disparity.link(disparityOut->input); + depth->rectifiedRight.link(manip->inputImage); + manip->out.link(nn->input); + manip->out.link(manipOut->input); + nn->out.link(nnOut->input); + + // Connect to device with above created pipeline + dai::Device device(pipeline); + // Start the pipeline + device.startPipeline(); + + // Queues + int queueSize = 8; + auto qRight = device.getOutputQueue("right", queueSize); + auto qDisparity = device.getOutputQueue("disparity", queueSize); + auto qManip = device.getOutputQueue("manip", queueSize); + auto qDet = device.getOutputQueue("nn", queueSize); + auto qRgbEnc = device.getOutputQueue("h265", 30, true); + + auto videoFile = std::ofstream("video.h265", std::ios::binary); + cv::namedWindow("right", cv::WINDOW_NORMAL); + cv::namedWindow("manip", cv::WINDOW_NORMAL); + + // Disparity range is 0..95, used for normalization + float disparity_multiplier = 255 / 95; + + while(true) { + auto inRight = qRight->get(); + auto inManip = qManip->get(); + auto inDet = qDet->get(); + auto inDisparity = qDisparity->get(); + auto detections = inDet->detections; + + cv::Mat frame = inRight->getCvFrame(); + cv::Mat frameManip = inManip->getCvFrame(); + cv::Mat frameDisparity = inDisparity->getCvFrame(); + + auto out1 = qRgbEnc->get(); + videoFile.write((char*)out1->getData().data(), out1->getData().size()); + + if (flipRectified){ + cv::flip(frameDisparity, frameDisparity, 1); + } + frameDisparity.convertTo(frameDisparity, CV_8UC1, disparity_multiplier); + cv::applyColorMap(frameDisparity, frameDisparity, cv::COLORMAP_JET); + + int offsetX = (camRight->getResolutionWidth() - camRight->getResolutionHeight()) / 2; + auto color = cv::Scalar(255, 0, 0); + for(auto& detection : detections) { + int x1 = detection.xmin * camRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * camRight->getResolutionHeight(); + int x2 = detection.xmax * camRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * camRight->getResolutionHeight(); + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the right cam frame + cv::imshow("right", frame); + + for(auto& detection : detections) { + int x1 = detection.xmin * camRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * camRight->getResolutionHeight(); + int x2 = detection.xmax * camRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * camRight->getResolutionHeight(); + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frameDisparity, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frameDisparity, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frameDisparity, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the disparity frame + cv::imshow("disparity", frameDisparity); + + for(auto& detection : detections) { + int x1 = detection.xmin * frameManip.cols; + int y1 = detection.ymin * frameManip.rows; + int x2 = detection.xmax * frameManip.cols; + int y2 = detection.ymax * frameManip.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frameManip, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frameManip, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frameManip, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the manip frame + cv::imshow("manip", frameManip); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q'){ + std::cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << std::endl; + std::cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << std::endl; + return 0; + } + } + return 0; +} \ No newline at end of file From 0f65db177a67eec4d551952ae61a8ffe663a9c69 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Sat, 24 Apr 2021 00:37:16 +0300 Subject: [PATCH 12/56] Add encoding max limit example --- examples/src/05_rgb_mono_encoding.cpp | 3 +- examples/src/13_encoding_max_limit.cpp | 84 ++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 examples/src/13_encoding_max_limit.cpp diff --git a/examples/src/05_rgb_mono_encoding.cpp b/examples/src/05_rgb_mono_encoding.cpp index 81b13b268..e099bcd31 100644 --- a/examples/src/05_rgb_mono_encoding.cpp +++ b/examples/src/05_rgb_mono_encoding.cpp @@ -16,7 +16,6 @@ int main(int argc, char** argv) { using namespace std::chrono; std::signal(SIGINT, &sigintHandler); - dai::Pipeline p; // Define a source - color camera and 2 mono camera @@ -26,6 +25,7 @@ int main(int argc, char** argv) { auto ve1 = p.create(); auto ve2 = p.create(); auto ve3 = p.create(); + auto ve1Out = p.create(); auto ve2Out = p.create(); auto ve3Out = p.create(); @@ -44,6 +44,7 @@ int main(int argc, char** argv) { monoCam->out.link(ve1->input); colorCam->video.link(ve2->input); monoCam2->out.link(ve3->input); + ve1->bitstream.link(ve1Out->input); ve2->bitstream.link(ve2Out->input); ve3->bitstream.link(ve3Out->input); diff --git a/examples/src/13_encoding_max_limit.cpp b/examples/src/13_encoding_max_limit.cpp new file mode 100644 index 000000000..308136ec8 --- /dev/null +++ b/examples/src/13_encoding_max_limit.cpp @@ -0,0 +1,84 @@ +#include +#include +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + + // Keyboard interrupt (Ctrl + C) detected +static std::atomic alive{true}; +static void sigintHandler(int signum) { + alive = false; +} + +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::signal(SIGINT, &sigintHandler); + + dai::Pipeline p; + + // Define a source - color camera and 2 mono camera + auto colorCam = p.create(); + auto monoCam = p.create(); + auto monoCam2 = p.create(); + auto ve1 = p.create(); + auto ve2 = p.create(); + auto ve3 = p.create(); + + auto ve1Out = p.create(); + auto ve2Out = p.create(); + auto ve3Out = p.create(); + + colorCam->setBoardSocket(dai::CameraBoardSocket::RGB); + colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); + monoCam->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoCam2->setBoardSocket(dai::CameraBoardSocket::RIGHT); + ve1Out->setStreamName("ve1Out"); + ve2Out->setStreamName("ve2Out"); + ve3Out->setStreamName("ve3Out"); + ve1->setDefaultProfilePreset(1280, 720, 25, dai::VideoEncoderProperties::Profile::H264_MAIN); + ve2->setDefaultProfilePreset(3840, 2160, 25, dai::VideoEncoderProperties::Profile::H265_MAIN); + ve3->setDefaultProfilePreset(1280, 720, 25, dai::VideoEncoderProperties::Profile::H264_MAIN); + + // Create outputs + monoCam->out.link(ve1->input); + colorCam->video.link(ve2->input); + monoCam2->out.link(ve3->input); + + ve1->bitstream.link(ve1Out->input); + ve2->bitstream.link(ve2Out->input); + ve3->bitstream.link(ve3Out->input); + + // Pipeline is defined, now we can connect to the device + dai::Device d(p); + // Start pipeline + d.startPipeline(); + + // Output queues will be used to get the encoded data from the output defined above + auto outQ1 = d.getOutputQueue("ve1Out", 30, true); + auto outQ2 = d.getOutputQueue("ve2Out", 30, true); + auto outQ3 = d.getOutputQueue("ve3Out", 30, true); + + // The .h264 / .h265 files are raw stream files (not playable yet) + auto videoFile1 = std::ofstream("mono1.h264", std::ios::binary); + auto videoFile2 = std::ofstream("color.h265", std::ios::binary); + auto videoFile3 = std::ofstream("mono2.h264", std::ios::binary); + std::cout << "Press Ctrl+C to stop encoding..." << std::endl; + + while(alive) { + auto out1 = outQ1->get(); + videoFile1.write((char*)out1->getData().data(), out1->getData().size()); + auto out2 = outQ2->get(); + videoFile2.write((char*)out2->getData().data(), out2->getData().size()); + auto out3 = outQ3->get(); + videoFile3.write((char*)out3->getData().data(), out3->getData().size()); + } + + std::cout << "To view the encoded data, convert the stream file (.h264/.h265) into a video file (.mp4), using a command below:" << std::endl; + std::cout << "ffmpeg -framerate 25 -i mono1.h264 -c copy mono1.mp4" << std::endl; + std::cout << "ffmpeg -framerate 25 -i mono2.h264 -c copy mono2.mp4" << std::endl; + std::cout << "ffmpeg -framerate 25 -i color.h265 -c copy color.mp4" << std::endl; + + return 0; +} \ No newline at end of file From f9c7daf9d848214f060255ad07023f01a6d4ee90 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Mon, 26 Apr 2021 17:20:35 +0300 Subject: [PATCH 13/56] Add rgb mobilenet 4k example --- examples/src/05_rgb_mono_encoding.cpp | 1 + examples/src/13_encoding_max_limit.cpp | 1 + examples/src/15_rgb_mobilenet_4k.cpp | 114 +++++++++++++++++++++++++ 3 files changed, 116 insertions(+) create mode 100644 examples/src/15_rgb_mobilenet_4k.cpp diff --git a/examples/src/05_rgb_mono_encoding.cpp b/examples/src/05_rgb_mono_encoding.cpp index e099bcd31..07284cc15 100644 --- a/examples/src/05_rgb_mono_encoding.cpp +++ b/examples/src/05_rgb_mono_encoding.cpp @@ -30,6 +30,7 @@ int main(int argc, char** argv) { auto ve2Out = p.create(); auto ve3Out = p.create(); + // Properties colorCam->setBoardSocket(dai::CameraBoardSocket::RGB); monoCam->setBoardSocket(dai::CameraBoardSocket::LEFT); monoCam2->setBoardSocket(dai::CameraBoardSocket::RIGHT); diff --git a/examples/src/13_encoding_max_limit.cpp b/examples/src/13_encoding_max_limit.cpp index 308136ec8..a1fa71232 100644 --- a/examples/src/13_encoding_max_limit.cpp +++ b/examples/src/13_encoding_max_limit.cpp @@ -30,6 +30,7 @@ int main(int argc, char** argv) { auto ve2Out = p.create(); auto ve3Out = p.create(); + // Properties colorCam->setBoardSocket(dai::CameraBoardSocket::RGB); colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); monoCam->setBoardSocket(dai::CameraBoardSocket::LEFT); diff --git a/examples/src/15_rgb_mobilenet_4k.cpp b/examples/src/15_rgb_mobilenet_4k.cpp new file mode 100644 index 000000000..43a122453 --- /dev/null +++ b/examples/src/15_rgb_mobilenet_4k.cpp @@ -0,0 +1,114 @@ +#include +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +int main(int argc, char** argv) { + using namespace std; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define source + auto camRgb = pipeline.create(); + auto nn = pipeline.create(); + auto nnOut = pipeline.create(); + auto xoutVideo = pipeline.create(); + auto xoutPreview = pipeline.create(); + + // Properties + camRgb->setPreviewSize(300, 300); // NN input + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); + camRgb->setInterleaved(false); + camRgb->setPreviewKeepAspectRatio(false); + // Define a neural network that will make predictions based on the source frames + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + + xoutVideo->setStreamName("video"); + xoutPreview->setStreamName("preview"); + nnOut->setStreamName("nn"); + + // Create outputs + camRgb->video.link(xoutVideo->input); + camRgb->preview.link(xoutPreview->input); + camRgb->preview.link(nn->input); + nn->out.link(nnOut->input); + + // Connect to device with above created pipeline + dai::Device device(pipeline); + // Start the pipeline + device.startPipeline(); + + // Queues + auto qVideo = device.getOutputQueue("video", 4, false); + auto qPreview = device.getOutputQueue("preview", 4, false); + auto qDet = device.getOutputQueue("nn", 4, false); + + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, auto frame, auto detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; + + cv::namedWindow("video", cv::WINDOW_NORMAL); + cv::resizeWindow("video", 1280, 720); + std::cout << "Resize video window with mouse drag!" << std::endl; + + while(true) { + auto inVideo = qVideo->get(); + auto inPreview = qPreview->get(); + auto inDet = qDet->get(); + auto detections = inDet->detections; + cv::Mat videoFrame = inVideo->getCvFrame(); + cv::Mat previewFrame = inPreview->getCvFrame(); + + displayFrame("video", videoFrame, detections); + displayFrame("preview", previewFrame, detections); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') + return 0; + } + return 0; +} \ No newline at end of file From e8e5631c0d2077b486eb0f812638d0cac964d447 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Mon, 26 Apr 2021 18:04:22 +0300 Subject: [PATCH 14/56] Upgraded rgb mobilenet example --- examples/src/08_rgb_mobilenet.cpp | 187 +++++++++++++++--------------- 1 file changed, 91 insertions(+), 96 deletions(-) diff --git a/examples/src/08_rgb_mobilenet.cpp b/examples/src/08_rgb_mobilenet.cpp index 60b99894a..13aeb2035 100644 --- a/examples/src/08_rgb_mobilenet.cpp +++ b/examples/src/08_rgb_mobilenet.cpp @@ -1,4 +1,4 @@ - +#include #include #include @@ -7,42 +7,16 @@ // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" -static bool syncNN = true; - -dai::Pipeline createNNPipeline(std::string nnPath) { - dai::Pipeline p; - - auto colorCam = p.create(); - auto xlinkOut = p.create(); - auto nn1 = p.create(); - auto nnOut = p.create(); - - nn1->setBlobPath(nnPath); - - xlinkOut->setStreamName("preview"); - nnOut->setStreamName("detections"); - - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; - // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(nn1->input); - if(syncNN) { - nn1->passthrough.link(xlinkOut->input); - } else { - colorCam->preview.link(xlinkOut->input); - } - - nn1->out.link(nnOut->input); - - return p; -} +static std::atomic syncNN{true}; int main(int argc, char** argv) { using namespace std; - + using namespace std::chrono; // Default blob path provided by Hunter private data download // Applicable for easier example usage only std::string nnPath(BLOB_PATH); @@ -56,78 +30,99 @@ int main(int argc, char** argv) { printf("Using blob at path: %s\n", nnPath.c_str()); // Create pipeline - dai::Pipeline p = createNNPipeline(nnPath); + dai::Pipeline pipeline; + + // Define source + auto camRgb = pipeline.create(); + auto xoutRgb = pipeline.create(); + auto nn = pipeline.create(); + auto nnOut = pipeline.create(); + + // Properties + camRgb->setPreviewSize(300, 300); // NN input + camRgb->setInterleaved(false); + camRgb->setFps(40); + // Define a neural network that will make predictions based on the source frames + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + + xoutRgb->setStreamName("rgb"); + nnOut->setStreamName("nn"); + + // Create outputs + if (syncNN) { + nn->passthrough.link(xoutRgb->input); + } else { + camRgb->preview.link(xoutRgb->input); + } + + camRgb->preview.link(nn->input); + nn->out.link(nnOut->input); // Connect to device with above created pipeline - dai::Device d(p); + dai::Device device(pipeline); // Start the pipeline - d.startPipeline(); - - cv::Mat frame; - auto preview = d.getOutputQueue("preview"); - auto detections = d.getOutputQueue("detections"); - - while(1) { - auto imgFrame = preview->get(); - if(imgFrame) { - printf("Frame - w: %d, h: %d\n", imgFrame->getWidth(), imgFrame->getHeight()); - frame = toMat(imgFrame->getData(), imgFrame->getWidth(), imgFrame->getHeight(), 3, 1); - } - - struct Detection { - unsigned int label; - float score; - float x_min; - float y_min; - float x_max; - float y_max; - }; - - vector dets; - - auto det = detections->get(); - std::vector detData = det->getFirstLayerFp16(); - if(detData.size() > 0) { - int i = 0; - while(detData[i * 7] != -1.0f) { - Detection d; - d.label = detData[i * 7 + 1]; - d.score = detData[i * 7 + 2]; - d.x_min = detData[i * 7 + 3]; - d.y_min = detData[i * 7 + 4]; - d.x_max = detData[i * 7 + 5]; - d.y_max = detData[i * 7 + 6]; - i++; - dets.push_back(d); + device.startPipeline(); + + // Queues + auto qRgb = device.getOutputQueue("rgb", 4, false); + auto qDet = device.getOutputQueue("nn", 4, false); + + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, auto frame, auto detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } - - for(const auto& d : dets) { - int x1 = d.x_min * frame.cols; - int y1 = d.y_min * frame.rows; - int x2 = d.x_max * frame.cols; - int y2 = d.y_max * frame.rows; - - cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), cv::Scalar(255, 255, 255)); + // Show the frame + cv::imshow(name, frame); + }; + + auto startTime = steady_clock::now(); + int counter = 0; + float fps = 0; + + while(true) { + auto inRgb = qRgb->get(); + auto inDet = qDet->get(); + auto detections = inDet->detections; + cv::Mat frame = inRgb->getCvFrame(); + + counter++; + auto currentTime = steady_clock::now(); + auto elapsed = duration_cast>(currentTime - startTime); + if(elapsed > seconds(1)) { + fps = counter / elapsed.count(); + counter = 0; + startTime = currentTime; } - printf("===================== %lu detection(s) =======================\n", dets.size()); - for(unsigned det = 0; det < dets.size(); ++det) { - printf("%5d | %6.4f | %7.4f | %7.4f | %7.4f | %7.4f\n", - dets[det].label, - dets[det].score, - dets[det].x_min, - dets[det].y_min, - dets[det].x_max, - dets[det].y_max); - } + std::stringstream fpsStr; + fpsStr << "NN fps: " <getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, 255, 0, 0); + + displayFrame("video", frame, detections); - cv::imshow("preview", frame); int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') return 0; - } } - return 0; -} +} \ No newline at end of file From 59dd7fcb31a6914e0b7d6e86ffb704708f28dd94 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Mon, 26 Apr 2021 18:28:03 +0300 Subject: [PATCH 15/56] Add rgb encoding mobilenet --- examples/src/18_rgb_encoding_mobilenet.cpp | 121 +++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 examples/src/18_rgb_encoding_mobilenet.cpp diff --git a/examples/src/18_rgb_encoding_mobilenet.cpp b/examples/src/18_rgb_encoding_mobilenet.cpp new file mode 100644 index 000000000..c5af21218 --- /dev/null +++ b/examples/src/18_rgb_encoding_mobilenet.cpp @@ -0,0 +1,121 @@ +#include +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + + +int main(int argc, char** argv) { + using namespace std; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define source + auto camRgb = pipeline.create(); + auto xoutRgb = pipeline.create(); + auto videoEncoder = pipeline.create(); + auto videoOut = pipeline.create(); + auto nn = pipeline.create(); + auto nnOut = pipeline.create(); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setPreviewSize(300, 300); + camRgb->setInterleaved(false); + + videoEncoder->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); + + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + + videoOut->setStreamName("h265"); + xoutRgb->setStreamName("rgb"); + nnOut->setStreamName("nn"); + + // Create outputs + camRgb->video.link(videoEncoder->input); + camRgb->preview.link(xoutRgb->input); + camRgb->preview.link(nn->input); + videoEncoder->bitstream.link(videoOut->input); + nn->out.link(nnOut->input); + + // Connect to device with above created pipeline + dai::Device device(pipeline); + // Start the pipeline + device.startPipeline(); + + // Queues + int queueSize = 8; + auto qRgb = device.getOutputQueue("rgb", queueSize); + auto qDet = device.getOutputQueue("nn", queueSize); + auto qRgbEnc = device.getOutputQueue("h265", 30, true); + + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, auto frame, auto detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; + + auto videoFile = std::ofstream("video.h264", std::ios::binary); + + while(true) { + auto inRgb = qRgb->get(); + auto inDet = qDet->get(); + auto out = qRgbEnc->get(); + auto detections = inDet->detections; + cv::Mat frame = inRgb->getCvFrame(); + + videoFile.write((char*)out->getData().data(), out->getData().size()); + + displayFrame("rgb", frame, detections); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q'){ + std::cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << std::endl; + std::cout << "ffmpeg -framerate 30 -i video.h264 -c copy video.mp4" << std::endl; + return 0; + } + } + return 0; +} \ No newline at end of file From 2ea179b3e57f3096983cedc7bd1eb70831843f7a Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Wed, 28 Apr 2021 01:23:56 +0300 Subject: [PATCH 16/56] Add video mobilenet and fix some others --- examples/src/01_rgb_preview.cpp | 68 +++++----- examples/src/02_mono_preview.cpp | 66 +++++----- examples/src/03_depth_preview.cpp | 88 ++++++++----- examples/src/04_rgb_encoding.cpp | 27 ++-- examples/src/05_rgb_mono_encoding.cpp | 51 ++++---- examples/src/06_rgb_full_resolution_saver.cpp | 39 +++--- .../src/07_mono_full_resolution_saver.cpp | 28 ++-- examples/src/08_rgb_mobilenet.cpp | 14 +- examples/src/17_video_mobilenet.cpp | 120 ++++++++++++++++++ 9 files changed, 315 insertions(+), 186 deletions(-) create mode 100644 examples/src/17_video_mobilenet.cpp diff --git a/examples/src/01_rgb_preview.cpp b/examples/src/01_rgb_preview.cpp index 8e0509fa0..43c3bd4a4 100644 --- a/examples/src/01_rgb_preview.cpp +++ b/examples/src/01_rgb_preview.cpp @@ -1,52 +1,48 @@ - #include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" -dai::Pipeline createPipeline() { - dai::Pipeline p; +int main() { + using namespace std; + + dai::Pipeline pipeline; + + // Define source and output + auto camRgb = pipeline.create(); + auto xoutRgb = pipeline.create(); - auto camRgb = p.create(); + xoutRgb->setStreamName("rgb"); + + // Properties camRgb->setPreviewSize(300, 300); + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - camRgb->setInterleaved(true); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::RGB); - auto xlinkOut = p.create(); - xlinkOut->setStreamName("preview"); - // Link plugins CAM -> XLINK - camRgb->preview.link(xlinkOut->input); + // Linking + camRgb->preview.link(xoutRgb->input); - return p; -} + // Pipeline is defined, now we can connect to the device + dai::Device device(pipeline); + // Start pipeline + device.startPipeline(); -int main() { - using namespace std; + // Output queue will be used to get the rgb frames from the output defined above + auto qRgb = device.getOutputQueue("rgb", 4, false); - dai::Pipeline p = createPipeline(); - dai::Device d(p); - d.startPipeline(); - - cv::Mat frame; - auto preview = d.getOutputQueue("preview"); - - while(1) { - auto imgFrame = preview->get(); - if(imgFrame) { - printf("Frame - w: %d, h: %d\n", imgFrame->getWidth(), imgFrame->getHeight()); - frame = cv::Mat(imgFrame->getHeight(), imgFrame->getWidth(), CV_8UC3, imgFrame->getData().data()); - cv::imshow("preview", frame); - int key = cv::waitKey(1); - if(key == 'q') { - return 0; - } - } else { - std::cout << "Not ImgFrame" << std::endl; + while(true) { + auto inRgb = qRgb->get(); + + // Retrieve 'bgr' (opencv format) frame + cv::imshow("bgr", inRgb->getCvFrame()); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + return 0; } } - return 0; -} +} \ No newline at end of file diff --git a/examples/src/02_mono_preview.cpp b/examples/src/02_mono_preview.cpp index a209bc94f..c213a543c 100644 --- a/examples/src/02_mono_preview.cpp +++ b/examples/src/02_mono_preview.cpp @@ -1,59 +1,51 @@ - #include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" -dai::Pipeline createPipeline() { - dai::Pipeline p; - - auto camLeft = p.create(); - auto camRight = p.create(); - auto xoutLeft = p.create(); - auto xoutRight = p.create(); +int main() { + using namespace std; + // Start defining a pipeline + dai::Pipeline pipeline; - camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); - camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); - camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); - camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + // Define sources and outputs + auto camLeft = pipeline.create(); + auto camRight = pipeline.create(); + auto xoutLeft = pipeline.create(); + auto xoutRight = pipeline.create(); xoutLeft->setStreamName("left"); xoutRight->setStreamName("right"); - // Link plugins CAM -> XLINK - camLeft->out.link(xoutLeft->input); - camRight->out.link(xoutRight->input); + // Properties + camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); - return p; -} + // Linking + camRight->out.link(xoutRight->input); + camLeft->out.link(xoutLeft->input); -int main() { - using namespace std; + dai::Device device(pipeline); + device.startPipeline(); - dai::Pipeline p = createPipeline(); - dai::Device d(p); - d.startPipeline(); + // Output queues will be used to get the grayscale frames from the outputs defined above + auto qLeft = device.getOutputQueue("left", 4, false); + auto qRight = device.getOutputQueue("right", 4, false); - cv::Mat frame; - auto leftQueue = d.getOutputQueue("left"); - auto rightQueue = d.getOutputQueue("right"); + while(true) { + auto inLeft = qLeft->get(); + auto inRight = qRight->get(); - while(1) { - auto left = leftQueue->get(); - cv::Mat LeftFrame = left->getFrame(); - cv::imshow("left", LeftFrame); - auto right = rightQueue->get(); - cv::Mat RightFrame = right->getFrame(); - cv::imshow("right", RightFrame); + cv::imshow("left", inLeft->getCvFrame()); + cv::imshow("right", inRight->getCvFrame()); int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } - return 0; -} +} \ No newline at end of file diff --git a/examples/src/03_depth_preview.cpp b/examples/src/03_depth_preview.cpp index 588a4c161..f22892c9a 100644 --- a/examples/src/03_depth_preview.cpp +++ b/examples/src/03_depth_preview.cpp @@ -1,61 +1,81 @@ - #include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" -dai::Pipeline createPipeline() { - dai::Pipeline p; +// Closer-in minimum depth, disparity range is doubled (from 95 to 190): +static std::atomic extended_disparity{false}; +// Better accuracy for longer distance, fractional disparity 32-levels: +static std::atomic subpixel{false}; +// Better handling for occlusions: +static std::atomic lr_check{false}; + +int main() { + using namespace std; - auto camLeft = p.create(); - auto camRight = p.create(); - auto xout = p.create(); - auto depth = p.create(); + dai::Pipeline pipeline; - camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + auto camLeft = pipeline.create(); + auto camRight = pipeline.create(); + auto depth = pipeline.create(); + auto xout = pipeline.create(); + + xout->setStreamName("disparity"); + + camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); - camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + // Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) depth->setConfidenceThreshold(200); - //depth->setMedianFilter(median); + depth->setOutputDepth(false); + // Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default) + depth->setMedianFilter(dai::StereoDepthProperties::MedianFilter::KERNEL_7x7); + depth->setLeftRightCheck(lr_check); + + // Normal disparity values range from 0..95, will be used for normalization + int max_disparity = 95; - // Link plugins CAM -> XLINK + if (extended_disparity) max_disparity *= 2; // Double the range + depth->setExtendedDisparity(extended_disparity); + + if (subpixel) max_disparity *= 32; // 5 fractional bits, x32 + depth->setSubpixel(subpixel); + + // When we get disparity to the host, we will multiply all values with the multiplier + // for better visualization + float multiplier = 255 / max_disparity; + + // Linking camLeft->out.link(depth->left); camRight->out.link(depth->right); - - xout->setStreamName("disparity"); depth->disparity.link(xout->input); - return p; -} + // Pipeline is defined, now we can connect to the device + dai::Device device(pipeline); + // Start pipeline + device.startPipeline(); -int main() { - using namespace std; + // Output queue will be used to get the disparity frames from the outputs defined above + auto q = device.getOutputQueue("disparity", 4, false); - dai::Pipeline p = createPipeline(); - dai::Device d(p); - d.startPipeline(); + while(true) { + auto inDepth = q->get(); + auto frame = inDepth->getFrame(); + frame.convertTo(frame, CV_8UC1, multiplier); - auto dispQueue = d.getOutputQueue("disparity", 4, false); + cv::imshow("disparity", frame); - while(1) { - auto disparity = dispQueue->get(); - cv::Mat disp(disparity->getHeight(), disparity->getWidth(), CV_8UC1, disparity->getData().data()); - disp.convertTo(disp, CV_8UC1, 3.0); - cv::imshow("disparity", disp); - cv::Mat disp_color; - cv::applyColorMap(disp, disp_color, cv::COLORMAP_JET); - cv::imshow("disparity_color", disp_color); + // Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html + cv::applyColorMap(frame, frame, cv::COLORMAP_JET); + cv::imshow("disparity_color", frame); int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } - return 0; -} +} \ No newline at end of file diff --git a/examples/src/04_rgb_encoding.cpp b/examples/src/04_rgb_encoding.cpp index fc566c1ba..54a81d0f0 100644 --- a/examples/src/04_rgb_encoding.cpp +++ b/examples/src/04_rgb_encoding.cpp @@ -15,30 +15,31 @@ int main(int argc, char** argv) { // Detect signal interrupts (Ctrl + C) std::signal(SIGINT, &sigintHandler); - dai::Pipeline p; + dai::Pipeline pipeline; - // Define a source - color camera - auto colorCam = p.create(); - auto xout = p.create(); - auto videnc = p.create(); + // Define sources and outputs + auto camRgb = pipeline.create(); + auto videoEnc = pipeline.create(); + auto xout = pipeline.create(); xout->setStreamName("h265"); - colorCam->setBoardSocket(dai::CameraBoardSocket::RGB); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); - videnc->setDefaultProfilePreset(3840, 2160, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); + videoEnc->setDefaultProfilePreset(3840, 2160, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); // Create outputs - colorCam->video.link(videnc->input); - videnc->bitstream.link(xout->input); + camRgb->video.link(videoEnc->input); + videoEnc->bitstream.link(xout->input); // Pipeline is defined, now we can connect to the device - dai::Device d(p); + dai::Device device(pipeline); // Start pipeline - d.startPipeline(); + device.startPipeline(); // Output queue will be used to get the encoded data from the output defined above - auto q = d.getOutputQueue("h265", 30, true); + auto q = device.getOutputQueue("h265", 30, true); // The .h265 file is a raw stream file (not playable yet) auto videoFile = std::ofstream("video.h265", std::ios::binary); diff --git a/examples/src/05_rgb_mono_encoding.cpp b/examples/src/05_rgb_mono_encoding.cpp index 07284cc15..31ef08dfb 100644 --- a/examples/src/05_rgb_mono_encoding.cpp +++ b/examples/src/05_rgb_mono_encoding.cpp @@ -16,49 +16,50 @@ int main(int argc, char** argv) { using namespace std::chrono; std::signal(SIGINT, &sigintHandler); - dai::Pipeline p; + dai::Pipeline pipeline; - // Define a source - color camera and 2 mono camera - auto colorCam = p.create(); - auto monoCam = p.create(); - auto monoCam2 = p.create(); - auto ve1 = p.create(); - auto ve2 = p.create(); - auto ve3 = p.create(); + // Define sources and outputs + auto camRgb = pipeline.create(); + auto camLeft = pipeline.create(); + auto camRight = pipeline.create(); + auto ve1 = pipeline.create(); + auto ve2 = pipeline.create(); + auto ve3 = pipeline.create(); - auto ve1Out = p.create(); - auto ve2Out = p.create(); - auto ve3Out = p.create(); + auto ve1Out = pipeline.create(); + auto ve2Out = pipeline.create(); + auto ve3Out = pipeline.create(); - // Properties - colorCam->setBoardSocket(dai::CameraBoardSocket::RGB); - monoCam->setBoardSocket(dai::CameraBoardSocket::LEFT); - monoCam2->setBoardSocket(dai::CameraBoardSocket::RIGHT); ve1Out->setStreamName("ve1Out"); ve2Out->setStreamName("ve2Out"); ve3Out->setStreamName("ve3Out"); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + // Create encoders, one for each camera, consuming the frames and encoding them using H.264 / H.265 encoding ve1->setDefaultProfilePreset(1280, 720, 30, dai::VideoEncoderProperties::Profile::H264_MAIN); ve2->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); ve3->setDefaultProfilePreset(1280, 720, 30, dai::VideoEncoderProperties::Profile::H264_MAIN); - // Create outputs - monoCam->out.link(ve1->input); - colorCam->video.link(ve2->input); - monoCam2->out.link(ve3->input); - + // Linking + camLeft->out.link(ve1->input); + camRgb->video.link(ve2->input); + camRight->out.link(ve3->input); ve1->bitstream.link(ve1Out->input); ve2->bitstream.link(ve2Out->input); ve3->bitstream.link(ve3Out->input); // Pipeline is defined, now we can connect to the device - dai::Device d(p); + dai::Device device(pipeline); // Start pipeline - d.startPipeline(); + device.startPipeline(); // Output queues will be used to get the encoded data from the output defined above - auto outQ1 = d.getOutputQueue("ve1Out", 30, true); - auto outQ2 = d.getOutputQueue("ve2Out", 30, true); - auto outQ3 = d.getOutputQueue("ve3Out", 30, true); + auto outQ1 = device.getOutputQueue("ve1Out", 30, true); + auto outQ2 = device.getOutputQueue("ve2Out", 30, true); + auto outQ3 = device.getOutputQueue("ve3Out", 30, true); // The .h264 / .h265 files are raw stream files (not playable yet) auto videoFile1 = std::ofstream("mono1.h264", std::ios::binary); diff --git a/examples/src/06_rgb_full_resolution_saver.cpp b/examples/src/06_rgb_full_resolution_saver.cpp index f437fab91..1b782e544 100644 --- a/examples/src/06_rgb_full_resolution_saver.cpp +++ b/examples/src/06_rgb_full_resolution_saver.cpp @@ -1,7 +1,6 @@ #include #include #include -#include "utility.hpp" #include // Inludes common necessary includes for development using depthai library @@ -11,41 +10,42 @@ int main(int argc, char** argv) { using namespace std::chrono; - dai::Pipeline p; + dai::Pipeline pipeline; - // Define a source - color camera - auto colorCam = p.create(); - auto xoutJpeg = p.create(); - auto xoutRgb = p.create(); - auto videoEnc = p.create(); + // Define source and outputs + auto camRgb = pipeline.create(); + auto xoutJpeg = pipeline.create(); + auto xoutRgb = pipeline.create(); + auto videoEnc = pipeline.create(); - colorCam->setBoardSocket(dai::CameraBoardSocket::RGB); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); xoutJpeg->setStreamName("jpeg"); xoutRgb->setStreamName("rgb"); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); videoEnc->setDefaultProfilePreset(3840, 2160, 30, dai::VideoEncoderProperties::Profile::MJPEG); - // Create outputs - colorCam->video.link(xoutRgb->input); - colorCam->video.link(videoEnc->input); + // Linking + camRgb->video.link(xoutRgb->input); + camRgb->video.link(videoEnc->input); videoEnc->bitstream.link(xoutJpeg->input); // Pipeline is defined, now we can connect to the device - dai::Device d(p); + dai::Device device(pipeline); // Start pipeline - d.startPipeline(); + device.startPipeline(); // Queues - auto qRgb = d.getOutputQueue("rgb", 30, false); - auto qJpeg = d.getOutputQueue("jpeg", 30, true); + auto qRgb = device.getOutputQueue("rgb", 30, false); + auto qJpeg = device.getOutputQueue("jpeg", 30, true); mkdir("06_data", 0777); while(true) { auto inRgb = qRgb->tryGet(); if (inRgb != NULL){ - cv::Mat frame = inRgb->getCvFrame(); - cv::imshow("rgb", frame); + cv::imshow("rgb", inRgb->getCvFrame()); } auto encFrames = qJpeg->tryGetAll(); @@ -58,9 +58,8 @@ int main(int argc, char** argv) } int key = cv::waitKey(1); - if(key == 'q') + if(key == 'q' || key == 'Q') return 0; } - return 0; } diff --git a/examples/src/07_mono_full_resolution_saver.cpp b/examples/src/07_mono_full_resolution_saver.cpp index 1deb90626..9645470e4 100644 --- a/examples/src/07_mono_full_resolution_saver.cpp +++ b/examples/src/07_mono_full_resolution_saver.cpp @@ -11,44 +11,44 @@ int main(int argc, char** argv) using namespace std; using namespace std::chrono; - dai::Pipeline p; + dai::Pipeline pipeline; - // Define a source - color camera - auto camRight = p.create(); - auto xoutRight = p.create(); + // Define sources and outputs + auto camRight = pipeline.create(); + auto xoutRight = pipeline.create(); + xoutRight->setStreamName("right"); + + // Properties camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); - xoutRight->setStreamName("right"); - // Create outputs + // Linking camRight->out.link(xoutRight->input); - // Pipeline is defined, now we can connect to the device - dai::Device d(p); + dai::Device device(pipeline); // Start pipeline - d.startPipeline(); + device.startPipeline(); // Queue - auto q = d.getOutputQueue("right", 4, false); + auto q = device.getOutputQueue("right", 4, false); mkdir("07_data", 0777); while(true) { auto inRight = q->get(); - cv::Mat frameRight = inRight->getCvFrame(); // Frame is transformed and ready to be shown - cv::imshow("right", frameRight); + cv::imshow("right", inRight->getCvFrame()); uint64_t time = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); std::stringstream videoStr; videoStr << "07_data/" << time << ".png"; // After showing the frame, it's being stored inside a target directory as a PNG image - cv::imwrite(videoStr.str(), frameRight); + cv::imwrite(videoStr.str(), inRight->getCvFrame()); int key = cv::waitKey(1); - if(key == 'q') + if(key == 'q' || key == 'Q') return 0; } diff --git a/examples/src/08_rgb_mobilenet.cpp b/examples/src/08_rgb_mobilenet.cpp index 13aeb2035..07bade339 100644 --- a/examples/src/08_rgb_mobilenet.cpp +++ b/examples/src/08_rgb_mobilenet.cpp @@ -32,12 +32,15 @@ int main(int argc, char** argv) { // Create pipeline dai::Pipeline pipeline; - // Define source + // Define source and outputs auto camRgb = pipeline.create(); - auto xoutRgb = pipeline.create(); auto nn = pipeline.create(); + auto xoutRgb = pipeline.create(); auto nnOut = pipeline.create(); + xoutRgb->setStreamName("rgb"); + nnOut->setStreamName("nn"); + // Properties camRgb->setPreviewSize(300, 300); // NN input camRgb->setInterleaved(false); @@ -48,10 +51,7 @@ int main(int argc, char** argv) { nn->setNumInferenceThreads(2); nn->input.setBlocking(false); - xoutRgb->setStreamName("rgb"); - nnOut->setStreamName("nn"); - - // Create outputs + // Linking if (syncNN) { nn->passthrough.link(xoutRgb->input); } else { @@ -66,7 +66,7 @@ int main(int argc, char** argv) { // Start the pipeline device.startPipeline(); - // Queues + // Output queues will be used to get the rgb frames and nn data from the outputs defined above auto qRgb = device.getOutputQueue("rgb", 4, false); auto qDet = device.getOutputQueue("nn", 4, false); diff --git a/examples/src/17_video_mobilenet.cpp b/examples/src/17_video_mobilenet.cpp new file mode 100644 index 000000000..2e38b2891 --- /dev/null +++ b/examples/src/17_video_mobilenet.cpp @@ -0,0 +1,120 @@ +#include +#include + +#include "utility.hpp" + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +// MobilenetSSD label texts +static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", + "car", "cat", "chair", "cow", "diningtable", "dog", "horse", + "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; + +int main(int argc, char** argv) { + using namespace std; + // Default blob path provided by Hunter private data download + // Applicable for easier example usage only + std::string nnPath(BLOB_PATH); + std::string videoPath(VIDEO_PATH); + + // If path to blob specified, use that + if(argc > 2) { + nnPath = std::string(argv[1]); + videoPath = std::string(argv[2]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + printf("Using video at path: %s\n", videoPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define source + auto xinFrame = pipeline.create(); + auto nn = pipeline.create(); + auto nnOut = pipeline.create(); + + // Properties + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + + xinFrame->setStreamName("inFrame"); + nnOut->setStreamName("nn"); + + // Create outputs + xinFrame->out.link(nn->input); + nn->out.link(nnOut->input); + + // Connect to device with above created pipeline + dai::Device device(pipeline); + // Start the pipeline + device.startPipeline(); + + // Queues + auto qIn = device.getInputQueue("inFrame"); + auto qDet = device.getOutputQueue("nn", 4, false); + + + // auto to_planar = [](auto frame, auto size1) { + // cv::resize(frame, frame, cv::Size(size1, size1)); + // cv:: + // }; + + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, auto frame, auto detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; + + cv::Mat frame; + cv::VideoCapture cap(videoPath); + + cv::namedWindow("inFrame", cv::WINDOW_NORMAL); + cv::resizeWindow("inFrame", 1280, 720); + std::cout << "Resize video window with mouse drag!" << std::endl; + + while(cap.isOpened()) { + // Read frame from video + cap >> frame; + auto tensor = std::make_shared(); + + frame = resizeKeepAspectRatio(frame, cv::Size(300, 300), cv::Scalar(0)); + + toPlanar(frame, tensor->data); + + qIn->send(tensor); + + auto inDet = qDet->get(); + auto detections = inDet->detections; + + displayFrame("inFrame", frame, detections); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') + return 0; + } + return 0; +} \ No newline at end of file From 779e6e05ca119d21c2368ea636f4dbf57d109d90 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Thu, 29 Apr 2021 02:29:40 +0300 Subject: [PATCH 17/56] Optimization, comments --- examples/CMakeLists.txt | 86 ++++++++++++++----- examples/src/01_rgb_preview.cpp | 2 +- examples/src/02_mono_preview.cpp | 2 +- examples/src/03_depth_preview.cpp | 2 +- examples/src/04_rgb_encoding.cpp | 2 +- examples/src/05_rgb_mono_encoding.cpp | 2 +- examples/src/06_rgb_full_resolution_saver.cpp | 2 +- .../src/07_mono_full_resolution_saver.cpp | 5 +- examples/src/08_rgb_mobilenet.cpp | 6 +- examples/src/09_mono_mobilenet.cpp | 34 ++++---- examples/src/10_mono_depth_mobilenetssd.cpp | 30 ++++--- .../src/11_rgb_encoding_mono_mobilenet.cpp | 18 ++-- .../12_rgb_encoding_mono_mobilenet_depth.cpp | 24 +++--- examples/src/13_encoding_max_limit.cpp | 11 ++- examples/src/14_1_color_camera_control.cpp | 68 ++++++++------- examples/src/14_2_mono_camera_control.cpp | 41 ++++----- examples/src/14_3_depth_crop_control.cpp | 22 +++-- examples/src/15_rgb_mobilenet_4k.cpp | 17 ++-- examples/src/16_device_queue_event.cpp | 67 +++++++-------- examples/src/17_video_mobilenet.cpp | 24 +++--- examples/src/utility.cpp | 16 ++-- 21 files changed, 266 insertions(+), 215 deletions(-) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 6c13d1bd0..6d4f90b03 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -55,29 +55,6 @@ endmacro() # Hunter test data download -## message(STATUS "Location of test1.data: ${test1_data}") - -# Color camera preview output example -dai_add_example(01_rgb_preview src/01_rgb_preview.cpp) - -# Mono camera preview output example -dai_add_example(02_mono_preview src/02_mono_preview.cpp) - -# Depth preview output example -dai_add_example(03_depth_preview src/03_depth_preview.cpp) - -# rgb encoding -dai_add_example(04_rgb_encoding src/04_rgb_encoding.cpp) - -# rgb+mono encoding -dai_add_example(05_rgb_mono_encoding src/05_rgb_mono_encoding.cpp) - -# Color camera video output example -dai_add_example(28_1_rgb_camera_video src/28_1_rgb_camera_video.cpp) - -# Mono camera video output example -dai_add_example(28_2_mono_camera_video src/28_2_mono_camera_video.cpp) - # NeuralNetwork node, mobilenet example hunter_private_data( URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_6shave.blob" @@ -110,6 +87,66 @@ hunter_private_data( LOCATION mobilenet_8shaves_blob ) +# Video file with objects to detect +hunter_private_data( + URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/construction_vest.mp4" + SHA1 "271d8d0b702e683ce02957db7c100843de5ceaec" + FILE "construction_vest.mp4" + LOCATION construction_vest +) + +## message(STATUS "Location of test1.data: ${test1_data}") + +# Color camera preview output example +dai_add_example(01_rgb_preview src/01_rgb_preview.cpp) + +# Mono camera preview output example +dai_add_example(02_mono_preview src/02_mono_preview.cpp) + +# Depth preview output example +dai_add_example(03_depth_preview src/03_depth_preview.cpp) + +# rgb encoding +dai_add_example(04_rgb_encoding src/04_rgb_encoding.cpp) + +# rgb+mono encoding +dai_add_example(05_rgb_mono_encoding src/05_rgb_mono_encoding.cpp) + +dai_add_example(06_rgb_full_resolution_saver src/06_rgb_full_resolution_saver.cpp) + +dai_add_example(07_mono_full_resolution_saver src/07_mono_full_resolution_saver.cpp) + +dai_add_example(09_mono_mobilenet src/09_mono_mobilenet.cpp) +target_compile_definitions(09_mono_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") + +dai_add_example(10_mono_depth_mobilenetssd src/10_mono_depth_mobilenetssd.cpp) +target_compile_definitions(10_mono_depth_mobilenetssd PRIVATE BLOB_PATH="${mobilenet_blob}") + +dai_add_example(11_rgb_encoding_mono_mobilenet src/11_rgb_encoding_mono_mobilenet.cpp) +target_compile_definitions(11_rgb_encoding_mono_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") + +dai_add_example(12_rgb_encoding_mono_mobilenet_depth src/12_rgb_encoding_mono_mobilenet_depth.cpp) +target_compile_definitions(12_rgb_encoding_mono_mobilenet_depth PRIVATE BLOB_PATH="${mobilenet_blob}") + +dai_add_example(13_encoding_max_limit src/13_encoding_max_limit.cpp) + +dai_add_example(14_2_mono_camera_control src/14_2_mono_camera_control.cpp) + +dai_add_example(14_3_depth_crop_control src/14_3_depth_crop_control.cpp) + +dai_add_example(15_rgb_mobilenet_4k src/15_rgb_mobilenet_4k.cpp) +target_compile_definitions(15_rgb_mobilenet_4k PRIVATE BLOB_PATH="${mobilenet_blob}") + +dai_add_example(18_rgb_encoding_mobilenet src/18_rgb_encoding_mobilenet.cpp) +target_compile_definitions(18_rgb_encoding_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") + + +# Color camera video output example +dai_add_example(28_1_rgb_camera_video src/28_1_rgb_camera_video.cpp) + +# Mono camera video output example +dai_add_example(28_2_mono_camera_video src/28_2_mono_camera_video.cpp) + dai_add_example(08_rgb_mobilenet src/08_rgb_mobilenet.cpp) target_compile_definitions(08_rgb_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") @@ -181,3 +218,6 @@ target_compile_definitions(29_1_object_tracker PRIVATE BLOB_PATH="${mobilenet_bl dai_add_example(29_2_spatial_object_tracker src/29_2_spatial_object_tracker.cpp) target_compile_definitions(29_2_spatial_object_tracker PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(17_video_mobilenet src/17_video_mobilenet.cpp) +target_compile_definitions(17_video_mobilenet PRIVATE BLOB_PATH="${mobilenet_8shaves_blob}" VIDEO_PATH="${construction_vest}") + diff --git a/examples/src/01_rgb_preview.cpp b/examples/src/01_rgb_preview.cpp index 43c3bd4a4..dd68ebe0b 100644 --- a/examples/src/01_rgb_preview.cpp +++ b/examples/src/01_rgb_preview.cpp @@ -45,4 +45,4 @@ int main() { } } return 0; -} \ No newline at end of file +} diff --git a/examples/src/02_mono_preview.cpp b/examples/src/02_mono_preview.cpp index c213a543c..960471bb9 100644 --- a/examples/src/02_mono_preview.cpp +++ b/examples/src/02_mono_preview.cpp @@ -48,4 +48,4 @@ int main() { } } return 0; -} \ No newline at end of file +} diff --git a/examples/src/03_depth_preview.cpp b/examples/src/03_depth_preview.cpp index f22892c9a..402d7eeea 100644 --- a/examples/src/03_depth_preview.cpp +++ b/examples/src/03_depth_preview.cpp @@ -78,4 +78,4 @@ int main() { } } return 0; -} \ No newline at end of file +} diff --git a/examples/src/04_rgb_encoding.cpp b/examples/src/04_rgb_encoding.cpp index 54a81d0f0..b20780113 100644 --- a/examples/src/04_rgb_encoding.cpp +++ b/examples/src/04_rgb_encoding.cpp @@ -54,4 +54,4 @@ int main(int argc, char** argv) { std::cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << std::endl; return 0; -} \ No newline at end of file +} diff --git a/examples/src/05_rgb_mono_encoding.cpp b/examples/src/05_rgb_mono_encoding.cpp index 31ef08dfb..4819515db 100644 --- a/examples/src/05_rgb_mono_encoding.cpp +++ b/examples/src/05_rgb_mono_encoding.cpp @@ -82,4 +82,4 @@ int main(int argc, char** argv) { std::cout << "ffmpeg -framerate 30 -i color.h265 -c copy color.mp4" << std::endl; return 0; -} \ No newline at end of file +} diff --git a/examples/src/06_rgb_full_resolution_saver.cpp b/examples/src/06_rgb_full_resolution_saver.cpp index 1b782e544..6f691a2d4 100644 --- a/examples/src/06_rgb_full_resolution_saver.cpp +++ b/examples/src/06_rgb_full_resolution_saver.cpp @@ -24,7 +24,7 @@ int main(int argc, char** argv) // Properties camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); - videoEnc->setDefaultProfilePreset(3840, 2160, 30, dai::VideoEncoderProperties::Profile::MJPEG); + videoEnc->setDefaultProfilePreset(camRgb->getVideoSize(), camRgb->getFps(), dai::VideoEncoderProperties::Profile::MJPEG); // Linking camRgb->video.link(xoutRgb->input); diff --git a/examples/src/07_mono_full_resolution_saver.cpp b/examples/src/07_mono_full_resolution_saver.cpp index 9645470e4..b962d2cca 100644 --- a/examples/src/07_mono_full_resolution_saver.cpp +++ b/examples/src/07_mono_full_resolution_saver.cpp @@ -32,12 +32,12 @@ int main(int argc, char** argv) device.startPipeline(); // Queue - auto q = device.getOutputQueue("right", 4, false); + auto qRight = device.getOutputQueue("right", 4, false); mkdir("07_data", 0777); while(true) { - auto inRight = q->get(); + auto inRight = qRight->get(); // Frame is transformed and ready to be shown cv::imshow("right", inRight->getCvFrame()); @@ -51,6 +51,5 @@ int main(int argc, char** argv) if(key == 'q' || key == 'Q') return 0; } - return 0; } diff --git a/examples/src/08_rgb_mobilenet.cpp b/examples/src/08_rgb_mobilenet.cpp index 07bade339..1913fe9a1 100644 --- a/examples/src/08_rgb_mobilenet.cpp +++ b/examples/src/08_rgb_mobilenet.cpp @@ -32,7 +32,7 @@ int main(int argc, char** argv) { // Create pipeline dai::Pipeline pipeline; - // Define source and outputs + // Define sources and outputs auto camRgb = pipeline.create(); auto nn = pipeline.create(); auto xoutRgb = pipeline.create(); @@ -71,7 +71,7 @@ int main(int argc, char** argv) { auto qDet = device.getOutputQueue("nn", 4, false); // Add bounding boxes and text to the frame and show it to the user - auto displayFrame = [](std::string name, auto frame, auto detections) { + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { auto color = cv::Scalar(255, 0, 0); // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height for(auto& detection : detections) { @@ -125,4 +125,4 @@ int main(int argc, char** argv) { return 0; } return 0; -} \ No newline at end of file +} diff --git a/examples/src/09_mono_mobilenet.cpp b/examples/src/09_mono_mobilenet.cpp index 699f93aaa..954ca92dc 100644 --- a/examples/src/09_mono_mobilenet.cpp +++ b/examples/src/09_mono_mobilenet.cpp @@ -28,41 +28,45 @@ int main(int argc, char** argv) { // Create pipeline dai::Pipeline pipeline; - // Define source + // Define sources and outputs auto camRight = pipeline.create(); auto manip = pipeline.create(); + auto nn = pipeline.create(); auto manipOut = pipeline.create(); - auto detectionNetwork = pipeline.create(); auto nnOut = pipeline.create(); + manipOut->setStreamName("right"); + nnOut->setStreamName("detections"); + // Properties camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); - detectionNetwork->setConfidenceThreshold(0.5); - detectionNetwork->setBlobPath(nnPath); - detectionNetwork->setNumInferenceThreads(2); + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + // Convert the grayscale frame into the nn-acceptable form manip->initialConfig.setResize(300, 300); + // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) manip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); - manipOut->setStreamName("right"); - nnOut->setStreamName("detections"); - // Create outputs + // Linking camRight->out.link(manip->inputImage); - manip->out.link(detectionNetwork->input); + manip->out.link(nn->input); manip->out.link(manipOut->input); - detectionNetwork->out.link(nnOut->input); + nn->out.link(nnOut->input); // Connect to device with above created pipeline dai::Device device(pipeline); // Start the pipeline device.startPipeline(); - // Queues + // Output queues will be used to get the grayscale frames and nn data from the outputs defined above auto qRight = device.getOutputQueue("right", 4, false); - auto detections = device.getOutputQueue("detections", 4, false); + auto qDet = device.getOutputQueue("nn", 4, false); // Add bounding boxes and text to the frame and show it to the user - auto displayFrame = [](std::string name, auto frame, auto detections) { + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { auto color = cv::Scalar(255, 0, 0); // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height for(auto& detection : detections) { @@ -88,7 +92,7 @@ int main(int argc, char** argv) { while(true) { auto inRight = qRight->get(); - auto inDet = detections->get(); + auto inDet = qDet->get(); auto detections = inDet->detections; cv::Mat frame = inRight->getCvFrame(); @@ -99,4 +103,4 @@ int main(int argc, char** argv) { return 0; } return 0; -} \ No newline at end of file +} diff --git a/examples/src/10_mono_depth_mobilenetssd.cpp b/examples/src/10_mono_depth_mobilenetssd.cpp index 526c9c556..f8efc1f57 100644 --- a/examples/src/10_mono_depth_mobilenetssd.cpp +++ b/examples/src/10_mono_depth_mobilenetssd.cpp @@ -30,34 +30,41 @@ int main(int argc, char** argv) { // Create pipeline dai::Pipeline pipeline; - // Define source + // Define sources and outputs auto camRight = pipeline.create(); auto camLeft = pipeline.create(); auto stereo = pipeline.create(); auto manip = pipeline.create(); auto nn = pipeline.create(); - auto nnOut = pipeline.create(); + auto disparityOut = pipeline.create(); auto xoutRight = pipeline.create(); + auto nnOut = pipeline.create(); + + disparityOut->setStreamName("disparity"); + xoutRight->setStreamName("rectifiedRight"); + nnOut->setStreamName("nn"); // Properties camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); - stereo->setOutputRectified(true); + // Produce the depth map (using disparity output as it's easier to visualize depth this way) + stereo->setOutputRectified(true); // The rectified streams are horizontally mirrored by default stereo->setConfidenceThreshold(255); - stereo->setRectifyEdgeFillColor(0); + stereo->setRectifyEdgeFillColor(0); // Black, to better see the cutout from rectification (black stripe on the edges) + // Convert the grayscale frame into the nn-acceptable form manip->initialConfig.setResize(300, 300); + // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) manip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); + // Define a neural network that will make predictions based on the source frames nn->setConfidenceThreshold(0.5); nn->setBlobPath(nnPath); nn->setNumInferenceThreads(2); - disparityOut->setStreamName("disparity"); - xoutRight->setStreamName("rectifiedRight"); - nnOut->setStreamName("nn"); + nn->input.setBlocking(false); - // Create outputs + // Linking camRight->out.link(stereo->right); camLeft->out.link(stereo->left); stereo->rectifiedRight.link(manip->inputImage); @@ -71,13 +78,13 @@ int main(int argc, char** argv) { // Start the pipeline device.startPipeline(); - // Queues + // Output queues will be used to get the grayscale / depth frames and nn data from the outputs defined above auto qRight = device.getOutputQueue("rectifiedRight", 4, false); auto qDisparity = device.getOutputQueue("disparity", 4, false); auto qDet = device.getOutputQueue("nn", 4, false); // Add bounding boxes and text to the frame and show it to the user - auto show = [](std::string name, auto frame, auto detections) { + auto show = [](std::string name, cv::Mat frame, std::vector& detections) { auto color = cv::Scalar(255, 0, 0); // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height for(auto& detection : detections) { @@ -102,6 +109,7 @@ int main(int argc, char** argv) { }; float disparity_multiplier = 255 / 95; + while(true) { auto inRight = qRight->get(); auto inDet = qDet->get(); @@ -130,4 +138,4 @@ int main(int argc, char** argv) { return 0; } return 0; -} \ No newline at end of file +} diff --git a/examples/src/11_rgb_encoding_mono_mobilenet.cpp b/examples/src/11_rgb_encoding_mono_mobilenet.cpp index bc386f8f3..1bebf95a6 100644 --- a/examples/src/11_rgb_encoding_mono_mobilenet.cpp +++ b/examples/src/11_rgb_encoding_mono_mobilenet.cpp @@ -28,18 +28,22 @@ int main(int argc, char** argv) { // Create pipeline dai::Pipeline pipeline; - // Define source + // Define sources and outputs auto camRgb = pipeline.create(); auto camRight = pipeline.create(); auto videoEncoder = pipeline.create(); auto nn = pipeline.create(); auto manip = pipeline.create(); + auto videoOut = pipeline.create(); auto xoutRight = pipeline.create(); auto manipOut = pipeline.create(); auto nnOut = pipeline.create(); - auto videoOut = pipeline.create(); + videoOut->setStreamName("h265"); + xoutRight->setStreamName("right"); + manipOut->setStreamName("manip"); + nnOut->setStreamName("nn"); // Properties camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); @@ -51,17 +55,13 @@ int main(int argc, char** argv) { nn->setConfidenceThreshold(0.5); nn->setBlobPath(nnPath); nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) manip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); manip->initialConfig.setResize(300, 300); - videoOut->setStreamName("h265"); - xoutRight->setStreamName("right"); - manipOut->setStreamName("manip"); - nnOut->setStreamName("nn"); - - // Create outputs + // Linking camRgb->video.link(videoEncoder->input); videoEncoder->bitstream.link(videoOut->input); camRight->out.link(manip->inputImage); @@ -147,4 +147,4 @@ int main(int argc, char** argv) { } } return 0; -} \ No newline at end of file +} diff --git a/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp b/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp index 1f61dd0f3..572bcdb10 100644 --- a/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp +++ b/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp @@ -30,7 +30,7 @@ int main(int argc, char** argv) { // Create pipeline dai::Pipeline pipeline; - // Define source + // Define sources and outputs auto camRgb = pipeline.create(); auto videoEncoder = pipeline.create(); auto camLeft = pipeline.create(); @@ -45,6 +45,12 @@ int main(int argc, char** argv) { auto manipOut = pipeline.create(); auto nnOut = pipeline.create(); + videoOut->setStreamName("h265"); + xoutRight->setStreamName("right"); + disparityOut->setStreamName("disparity"); + manipOut->setStreamName("manip"); + nnOut->setStreamName("nn"); + // Properties camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); @@ -54,26 +60,22 @@ int main(int argc, char** argv) { camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); - depth->setConfidenceThreshold(255); + // Note: the rectified streams are horizontally mirrored by default depth->setOutputRectified(true); + depth->setConfidenceThreshold(255); depth->setRectifyMirrorFrame(false); - depth->setRectifyEdgeFillColor(0); + depth->setRectifyEdgeFillColor(0); // Black, to better see the cutout nn->setConfidenceThreshold(0.5); nn->setBlobPath(nnPath); nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); // The NN model expects BGR input-> By default ImageManip output type would be same as input (gray in this case) manip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); manip->initialConfig.setResize(300, 300); - videoOut->setStreamName("h265"); - disparityOut->setStreamName("disparity"); - xoutRight->setStreamName("right"); - manipOut->setStreamName("manip"); - nnOut->setStreamName("nn"); - - // Create outputs + // Linking camRgb->video.link(videoEncoder->input); videoEncoder->bitstream.link(videoOut->input); camRight->out.link(xoutRight->input); @@ -195,4 +197,4 @@ int main(int argc, char** argv) { } } return 0; -} \ No newline at end of file +} diff --git a/examples/src/13_encoding_max_limit.cpp b/examples/src/13_encoding_max_limit.cpp index a1fa71232..94f34f1d3 100644 --- a/examples/src/13_encoding_max_limit.cpp +++ b/examples/src/13_encoding_max_limit.cpp @@ -30,14 +30,17 @@ int main(int argc, char** argv) { auto ve2Out = p.create(); auto ve3Out = p.create(); + ve1Out->setStreamName("ve1Out"); + ve2Out->setStreamName("ve2Out"); + ve3Out->setStreamName("ve3Out"); + // Properties colorCam->setBoardSocket(dai::CameraBoardSocket::RGB); colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); monoCam->setBoardSocket(dai::CameraBoardSocket::LEFT); monoCam2->setBoardSocket(dai::CameraBoardSocket::RIGHT); - ve1Out->setStreamName("ve1Out"); - ve2Out->setStreamName("ve2Out"); - ve3Out->setStreamName("ve3Out"); + + // Setting to 26fps will trigger error ve1->setDefaultProfilePreset(1280, 720, 25, dai::VideoEncoderProperties::Profile::H264_MAIN); ve2->setDefaultProfilePreset(3840, 2160, 25, dai::VideoEncoderProperties::Profile::H265_MAIN); ve3->setDefaultProfilePreset(1280, 720, 25, dai::VideoEncoderProperties::Profile::H264_MAIN); @@ -82,4 +85,4 @@ int main(int argc, char** argv) { std::cout << "ffmpeg -framerate 25 -i color.h265 -c copy color.mp4" << std::endl; return 0; -} \ No newline at end of file +} diff --git a/examples/src/14_1_color_camera_control.cpp b/examples/src/14_1_color_camera_control.cpp index 5bf31c3b8..bfb41dfbc 100644 --- a/examples/src/14_1_color_camera_control.cpp +++ b/examples/src/14_1_color_camera_control.cpp @@ -12,14 +12,13 @@ */ #include #include - #include "utility.hpp" // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" // Step size ('W','A','S','D' controls) -static constexpr int STEP_SIZE = 16; +static constexpr int STEP_SIZE = 8; // Manual exposure/focus set step static constexpr int EXP_STEP = 500; // us @@ -33,52 +32,54 @@ static int clamp(int num, int v0, int v1) { int main() { dai::Pipeline pipeline; - // Nodes - auto colorCam = pipeline.create(); - auto controlIn = pipeline.create(); - auto configIn = pipeline.create(); + // Define sources and outputs + auto camRgb = pipeline.create(); auto videoEncoder = pipeline.create(); auto stillEncoder = pipeline.create(); + + auto controlIn = pipeline.create(); + auto configIn = pipeline.create(); auto videoMjpegOut = pipeline.create(); auto stillMjpegOut = pipeline.create(); auto previewOut = pipeline.create(); - // Properties - colorCam->setVideoSize(640, 360); - colorCam->setPreviewSize(300, 300); controlIn->setStreamName("control"); configIn->setStreamName("config"); - videoEncoder->setDefaultProfilePreset(colorCam->getVideoSize(), colorCam->getFps(), dai::VideoEncoderProperties::Profile::MJPEG); - stillEncoder->setDefaultProfilePreset(colorCam->getStillSize(), 1, dai::VideoEncoderProperties::Profile::MJPEG); videoMjpegOut->setStreamName("video"); stillMjpegOut->setStreamName("still"); previewOut->setStreamName("preview"); - // Link nodes - colorCam->video.link(videoEncoder->input); - colorCam->still.link(stillEncoder->input); - colorCam->preview.link(previewOut->input); - controlIn->out.link(colorCam->inputControl); - configIn->out.link(colorCam->inputConfig); + // Properties + camRgb->setVideoSize(640, 360); + camRgb->setPreviewSize(300, 300); + videoEncoder->setDefaultProfilePreset(camRgb->getVideoSize(), camRgb->getFps(), dai::VideoEncoderProperties::Profile::MJPEG); + stillEncoder->setDefaultProfilePreset(camRgb->getStillSize(), 1, dai::VideoEncoderProperties::Profile::MJPEG); + + // Linking + camRgb->video.link(videoEncoder->input); + camRgb->still.link(stillEncoder->input); + camRgb->preview.link(previewOut->input); + controlIn->out.link(camRgb->inputControl); + configIn->out.link(camRgb->inputConfig); videoEncoder->bitstream.link(videoMjpegOut->input); stillEncoder->bitstream.link(stillMjpegOut->input); // Connect to device - dai::Device dev(pipeline); + dai::Device device(pipeline); // Create data queues - auto controlQueue = dev.getInputQueue("control"); - auto configQueue = dev.getInputQueue("config"); - auto previewQueue = dev.getOutputQueue("preview"); - auto videoQueue = dev.getOutputQueue("video"); - auto stillQueue = dev.getOutputQueue("still"); + auto controlQueue = device.getInputQueue("control"); + auto configQueue = device.getInputQueue("config"); + auto previewQueue = device.getOutputQueue("preview"); + auto videoQueue = device.getOutputQueue("video"); + auto stillQueue = device.getOutputQueue("still"); // Start pipeline - dev.startPipeline(); + device.startPipeline(); // Max crop_x & crop_y - float max_crop_x = (colorCam->getResolutionWidth() - colorCam->getVideoWidth()) / (float)colorCam->getResolutionWidth(); - float max_crop_y = (colorCam->getResolutionHeight() - colorCam->getVideoHeight()) / (float)colorCam->getResolutionHeight(); + float max_crop_x = (camRgb->getResolutionWidth() - camRgb->getVideoWidth()) / (float)camRgb->getResolutionWidth(); + float max_crop_y = (camRgb->getResolutionHeight() - camRgb->getVideoHeight()) / (float)camRgb->getResolutionHeight(); // Default crop float crop_x = 0; @@ -120,10 +121,10 @@ int main() { cv::imshow("still", frame); } - // Update screen (10ms pooling rate) - int key = cv::waitKey(10); + // Update screen (1ms pooling rate) + int key = cv::waitKey(1); if(key == 'q') { - break; + return 0; } else if(key == 'c') { dai::CameraControl ctrl; ctrl.setCaptureStill(true); @@ -165,16 +166,16 @@ int main() { controlQueue->send(ctrl); } else if(key == 'w' || key == 'a' || key == 's' || key == 'd') { if(key == 'a') { - crop_x -= (max_crop_x / colorCam->getResolutionWidth()) * STEP_SIZE; + crop_x -= (max_crop_x / camRgb->getResolutionWidth()) * STEP_SIZE; if(crop_x < 0) crop_x = max_crop_x; } else if(key == 'd') { - crop_x += (max_crop_x / colorCam->getResolutionWidth()) * STEP_SIZE; + crop_x += (max_crop_x / camRgb->getResolutionWidth()) * STEP_SIZE; if(crop_x > max_crop_x) crop_x = 0.0f; } else if(key == 'w') { - crop_y -= (max_crop_y / colorCam->getResolutionHeight()) * STEP_SIZE; + crop_y -= (max_crop_y / camRgb->getResolutionHeight()) * STEP_SIZE; if(crop_y < 0) crop_y = max_crop_y; } else if(key == 's') { - crop_y += (max_crop_y / colorCam->getResolutionHeight()) * STEP_SIZE; + crop_y += (max_crop_y / camRgb->getResolutionHeight()) * STEP_SIZE; if(crop_y > max_crop_y) crop_y = 0.0f; } @@ -185,4 +186,5 @@ int main() { printf("Sending new crop - x: %f, y: %f\n", crop_x, crop_y); } } + return 0; } diff --git a/examples/src/14_2_mono_camera_control.cpp b/examples/src/14_2_mono_camera_control.cpp index e247f801c..0854e7de0 100644 --- a/examples/src/14_2_mono_camera_control.cpp +++ b/examples/src/14_2_mono_camera_control.cpp @@ -30,16 +30,22 @@ int main() { // Start defining a pipeline dai::Pipeline pipeline; - // Nodes + // Define sources and outputs auto camRight = pipeline.create(); auto camLeft = pipeline.create(); auto manipRight = pipeline.create(); auto manipLeft = pipeline.create(); + auto controlIn = pipeline.create(); auto configIn = pipeline.create(); auto manipOutRight = pipeline.create(); auto manipOutLeft = pipeline.create(); + controlIn->setStreamName("control"); + configIn->setStreamName("config"); + manipOutRight->setStreamName("right"); + manipOutLeft->setStreamName("left"); + // Crop range dai::Point2f topLeft(0.2, 0.2); dai::Point2f bottomRight(0.8, 0.8); @@ -49,14 +55,10 @@ int main() { camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); - controlIn->setStreamName("control"); - configIn->setStreamName("config"); - manipOutRight->setStreamName("right"); - manipOutLeft->setStreamName("left"); manipRight->initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); manipLeft->initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); - // Link nodes + // Linking camRight->out.link(manipRight->inputImage); camLeft->out.link(manipLeft->inputImage); controlIn->out.link(camRight->inputControl); @@ -67,16 +69,16 @@ int main() { manipLeft->out.link(manipOutLeft->input); // Connect to device - dai::Device dev(pipeline); + dai::Device device(pipeline); // Start pipeline - dev.startPipeline(); + device.startPipeline(); - // Queues - auto qRight = dev.getOutputQueue(manipOutRight->getStreamName(), 4, false); - auto qLeft = dev.getOutputQueue(manipOutLeft->getStreamName(), 4, false); - auto controlQueue = dev.getInputQueue(controlIn->getStreamName()); - auto configQueue = dev.getInputQueue(configIn->getStreamName()); + // Output queues will be used to get the grayscale frames + auto qRight = device.getOutputQueue(manipOutRight->getStreamName(), 4, false); + auto qLeft = device.getOutputQueue(manipOutLeft->getStreamName(), 4, false); + auto controlQueue = device.getInputQueue(controlIn->getStreamName()); + auto configQueue = device.getInputQueue(configIn->getStreamName()); // Defaults and limits for manual focus/exposure controls int exp_time = 20000; @@ -90,13 +92,11 @@ int main() { while(true) { auto inRight = qRight->get(); auto inLeft = qLeft->get(); - cv::Mat frameRight = inRight->getCvFrame(); - cv::Mat frameLeft = inLeft->getCvFrame(); - cv::imshow("right", frameRight); - cv::imshow("left", frameLeft); + cv::imshow("right", inRight->getCvFrame()); + cv::imshow("left", inLeft->getCvFrame()); - // Update screen (10ms pooling rate) - int key = cv::waitKey(10); + // Update screen (1ms pooling rate) + int key = cv::waitKey(1); if(key == 'q') { break; } else if(key == 'e') { @@ -149,4 +149,5 @@ int main() { sendCamConfig = false; } } -} \ No newline at end of file + return 0; +} diff --git a/examples/src/14_3_depth_crop_control.cpp b/examples/src/14_3_depth_crop_control.cpp index 36fe779c4..57c0749a2 100644 --- a/examples/src/14_3_depth_crop_control.cpp +++ b/examples/src/14_3_depth_crop_control.cpp @@ -17,14 +17,18 @@ int main() { // Start defining a pipeline dai::Pipeline pipeline; - // Nodes + // Define sources and outputs auto camRight = pipeline.create(); auto camLeft = pipeline.create(); auto manip = pipeline.create(); auto stereo = pipeline.create(); + auto configIn = pipeline.create(); auto xout = pipeline.create(); + configIn->setStreamName("config"); + xout->setStreamName("depth"); + // Crop range dai::Point2f topLeft(0.2, 0.2); dai::Point2f bottomRight(0.8, 0.8); @@ -34,14 +38,13 @@ int main() { camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); - configIn->setStreamName("config"); - xout->setStreamName("depth"); + manip->initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); manip->setMaxOutputFrameSize(camRight->getResolutionHeight() * camRight->getResolutionWidth() * 3); stereo->setConfidenceThreshold(200); stereo->setOutputDepth(true); - // Link nodes + // Linking configIn->out.link(manip->inputConfig); stereo->depth.link(manip->inputImage); manip->out.link(xout->input); @@ -49,14 +52,14 @@ int main() { camLeft->out.link(stereo->right); // Connect to device - dai::Device dev(pipeline); + dai::Device device(pipeline); // Start pipeline - dev.startPipeline(); + device.startPipeline(); // Queues - auto q = dev.getOutputQueue(xout->getStreamName(), 4, false); - auto configQueue = dev.getInputQueue(configIn->getStreamName()); + auto q = device.getOutputQueue(xout->getStreamName(), 4, false); + auto configQueue = device.getInputQueue(configIn->getStreamName()); while(true) { auto inDepth = q->get(); @@ -108,4 +111,5 @@ int main() { sendCamConfig = false; } } -} \ No newline at end of file + return 0; +} diff --git a/examples/src/15_rgb_mobilenet_4k.cpp b/examples/src/15_rgb_mobilenet_4k.cpp index 43a122453..318599a3e 100644 --- a/examples/src/15_rgb_mobilenet_4k.cpp +++ b/examples/src/15_rgb_mobilenet_4k.cpp @@ -28,12 +28,17 @@ int main(int argc, char** argv) { // Create pipeline dai::Pipeline pipeline; - // Define source + // Define sources and outputs auto camRgb = pipeline.create(); auto nn = pipeline.create(); - auto nnOut = pipeline.create(); + auto xoutVideo = pipeline.create(); auto xoutPreview = pipeline.create(); + auto nnOut = pipeline.create(); + + xoutVideo->setStreamName("video"); + xoutPreview->setStreamName("preview"); + nnOut->setStreamName("nn"); // Properties camRgb->setPreviewSize(300, 300); // NN input @@ -46,11 +51,7 @@ int main(int argc, char** argv) { nn->setNumInferenceThreads(2); nn->input.setBlocking(false); - xoutVideo->setStreamName("video"); - xoutPreview->setStreamName("preview"); - nnOut->setStreamName("nn"); - - // Create outputs + // Linking camRgb->video.link(xoutVideo->input); camRgb->preview.link(xoutPreview->input); camRgb->preview.link(nn->input); @@ -111,4 +112,4 @@ int main(int argc, char** argv) { return 0; } return 0; -} \ No newline at end of file +} diff --git a/examples/src/16_device_queue_event.cpp b/examples/src/16_device_queue_event.cpp index aca4edd6e..967f239c1 100644 --- a/examples/src/16_device_queue_event.cpp +++ b/examples/src/16_device_queue_event.cpp @@ -1,7 +1,6 @@ #include #include - #include "utility.hpp" // Inludes common necessary includes for development using depthai library @@ -9,58 +8,50 @@ int main(int argc, char** argv) { using namespace std; - using namespace std::chrono; - - dai::Pipeline p; - auto colorCam = p.create(); - auto xout = p.create(); - auto xout2 = p.create(); - auto videnc = p.create(); + // Create pipeline + dai::Pipeline pipeline; - // XLinkOut - xout->setStreamName("mjpeg"); - xout2->setStreamName("preview"); + // Define sources and outputs + auto camRgb = pipeline.create(); + auto camMono = pipeline.create(); + auto xoutRgb = pipeline.create(); + auto xoutMono = pipeline.create(); - // ColorCamera - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - // colorCam->setFps(5.0); - colorCam->setInterleaved(true); + xoutRgb->setStreamName("rgb"); + xoutMono->setStreamName("mono"); - // VideoEncoder - videnc->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::MJPEG); + // Properties + camRgb->setInterleaved(true); + camRgb->setPreviewSize(300, 300); - // Link plugins CAM -> XLINK - colorCam->video.link(videnc->input); - colorCam->preview.link(xout2->input); - videnc->bitstream.link(xout->input); + // Linking + camRgb->preview.link(xoutRgb->input); + camMono->out.link(xoutMono->input); // Connect to device with above created pipeline - dai::Device d(p); + dai::Device device(pipeline); // Start the pipeline - d.startPipeline(); + device.startPipeline(); - // Sets queues size and behaviour - d.getOutputQueue("mjpeg", 8, false); - d.getOutputQueue("preview", 8, false); + // Clear queue events + device.getQueueEvents(); - while(1) { - auto ev = d.getQueueEvent(); - if(ev == "preview") { - auto preview = d.getOutputQueue(ev)->get(); - cv::imshow("preview", cv::Mat(preview->getHeight(), preview->getWidth(), CV_8UC3, preview->getData().data())); - } else if(ev == "mjpeg") { - auto mjpeg = d.getOutputQueue(ev)->get(); - cv::Mat decodedFrame = cv::imdecode(cv::Mat(mjpeg->getData()), cv::IMREAD_COLOR); - cv::imshow("mjpeg", decodedFrame); + while(true) { + auto ev = device.getQueueEvent(); + + if(ev == "rgb") { + auto rgb = device.getOutputQueue(ev)->get(); + cv::imshow("rgb", rgb->getFrame()); + } else if(ev == "mono") { + auto mono = device.getOutputQueue(ev)->get(); + cv::imshow("mono", mono->getFrame()); } int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } - return 0; } diff --git a/examples/src/17_video_mobilenet.cpp b/examples/src/17_video_mobilenet.cpp index 2e38b2891..0d3f9c984 100644 --- a/examples/src/17_video_mobilenet.cpp +++ b/examples/src/17_video_mobilenet.cpp @@ -31,20 +31,21 @@ int main(int argc, char** argv) { // Create pipeline dai::Pipeline pipeline; - // Define source - auto xinFrame = pipeline.create(); + // Define source and outputs auto nn = pipeline.create(); + + auto xinFrame = pipeline.create(); auto nnOut = pipeline.create(); + xinFrame->setStreamName("inFrame"); + nnOut->setStreamName("nn"); + // Properties nn->setConfidenceThreshold(0.5); nn->setBlobPath(nnPath); nn->setNumInferenceThreads(2); nn->input.setBlocking(false); - xinFrame->setStreamName("inFrame"); - nnOut->setStreamName("nn"); - // Create outputs xinFrame->out.link(nn->input); nn->out.link(nnOut->input); @@ -54,18 +55,13 @@ int main(int argc, char** argv) { // Start the pipeline device.startPipeline(); - // Queues + // Input queue will be used to send video frames to the device. auto qIn = device.getInputQueue("inFrame"); + // Output queue will be used to get nn data from the video frames. auto qDet = device.getOutputQueue("nn", 4, false); - - // auto to_planar = [](auto frame, auto size1) { - // cv::resize(frame, frame, cv::Size(size1, size1)); - // cv:: - // }; - // Add bounding boxes and text to the frame and show it to the user - auto displayFrame = [](std::string name, auto frame, auto detections) { + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { auto color = cv::Scalar(255, 0, 0); // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height for(auto& detection : detections) { @@ -117,4 +113,4 @@ int main(int argc, char** argv) { return 0; } return 0; -} \ No newline at end of file +} diff --git a/examples/src/utility.cpp b/examples/src/utility.cpp index 64cfe80df..cc7ff6c79 100644 --- a/examples/src/utility.cpp +++ b/examples/src/utility.cpp @@ -4,7 +4,7 @@ #include "fp16/fp16.h" cv::Mat toMat(const std::vector& data, int w, int h , int numPlanes, int bpp){ - + cv::Mat frame; if(numPlanes == 3){ @@ -15,15 +15,15 @@ cv::Mat toMat(const std::vector& data, int w, int h , int numPlanes, in uint8_t b = data.data()[i + w*h * 0]; frame.data[i*3+0] = b; } - for(int i = 0; i < w*h; i++) { - uint8_t g = data.data()[i + w*h * 1]; + for(int i = 0; i < w*h; i++) { + uint8_t g = data.data()[i + w*h * 1]; frame.data[i*3+1] = g; } for(int i = 0; i < w*h; i++) { uint8_t r = data.data()[i + w*h * 2]; frame.data[i*3+2] = r; } - + } else { if(bpp == 3){ frame = cv::Mat(h, w, CV_8UC3); @@ -31,26 +31,26 @@ cv::Mat toMat(const std::vector& data, int w, int h , int numPlanes, in uint8_t b,g,r; b = data.data()[i + 2]; g = data.data()[i + 1]; - r = data.data()[i + 0]; + r = data.data()[i + 0]; frame.at( (i/bpp) / w, (i/bpp) % w) = cv::Vec3b(b,g,r); } } else if(bpp == 6) { //first denormalize //dump - + frame = cv::Mat(h, w, CV_8UC3); for(int y = 0; y < h; y++){ for(int x = 0; x < w; x++){ - const uint16_t* fp16 = (const uint16_t*) (data.data() + (y*w+x)*bpp); + const uint16_t* fp16 = (const uint16_t*) (data.data() + (y*w+x)*bpp); uint8_t r = (uint8_t) (fp16_ieee_to_fp32_value(fp16[0]) * 255.0f); uint8_t g = (uint8_t) (fp16_ieee_to_fp32_value(fp16[1]) * 255.0f); uint8_t b = (uint8_t) (fp16_ieee_to_fp32_value(fp16[2]) * 255.0f); frame.at(y, x) = cv::Vec3b(b,g,r); } } - + } } From fbcb74ecd6cdc563819d483598f808e96c1f1660 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Thu, 29 Apr 2021 16:28:57 +0300 Subject: [PATCH 18/56] Pipeline: add custom camera tuning blob option --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- include/depthai/pipeline/Pipeline.hpp | 6 ++++++ shared/depthai-shared | 2 +- src/pipeline/Pipeline.cpp | 19 +++++++++++++++++++ 4 files changed, 27 insertions(+), 2 deletions(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index 0020832ed..79afcbd39 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "3b51648b0d96c9391d878eede0fd5fd9b44084ca") +set(DEPTHAI_DEVICE_SIDE_COMMIT "3c55ad5aece06a391527d82ae001469cd10c8ed5") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index 022491135..144e74b9d 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -30,6 +30,7 @@ class PipelineImpl { PipelineSchema getPipelineSchema() const; OpenVINO::Version getPipelineOpenVINOVersion() const; AssetManager getAllAssets() const; + void setCameraTuningBlobPath(const std::string& path); // Access to nodes std::vector> getAllNodes() const; @@ -206,6 +207,11 @@ class Pipeline { void setOpenVINOVersion(OpenVINO::Version version) { impl()->forceRequiredOpenVINOVersion = version; } + + /// Set a camera IQ (Image Quality) tuning blob, used for all cameras + void setCameraTuningBlobPath(const std::string& path) { + impl()->setCameraTuningBlobPath(path); + } }; } // namespace dai diff --git a/shared/depthai-shared b/shared/depthai-shared index 7f55bf769..ab341a57f 160000 --- a/shared/depthai-shared +++ b/shared/depthai-shared @@ -1 +1 @@ -Subproject commit 7f55bf7692a327215b501abf7458a1f439ff6ab8 +Subproject commit ab341a57f9b669ccd1820b40cb827e528086c81e diff --git a/src/pipeline/Pipeline.cpp b/src/pipeline/Pipeline.cpp index 18e637e46..d0ec0f10e 100644 --- a/src/pipeline/Pipeline.cpp +++ b/src/pipeline/Pipeline.cpp @@ -4,6 +4,7 @@ // std #include +#include // libraries #include "spdlog/fmt/fmt.h" @@ -245,6 +246,24 @@ OpenVINO::Version PipelineImpl::getPipelineOpenVINOVersion() const { return openvinoVersion; } +void PipelineImpl::setCameraTuningBlobPath(const std::string& path) { + std::string assetKey = "camTuning"; + + std::ifstream blobStream(path, std::ios::in | std::ios::binary); + if(!blobStream.is_open()) { + throw std::runtime_error("Pipeline | Couldn't open camera tuning blob at path: " + path); + } + + Asset blobAsset; + blobAsset.alignment = 64; + blobAsset.data = std::vector(std::istreambuf_iterator(blobStream), {}); + + assetManager.set(assetKey, blobAsset); + + globalProperties.cameraTuningBlobUri = std::string("asset:") + assetKey; + globalProperties.cameraTuningBlobSize = blobAsset.data.size(); +} + // Remove node capability void PipelineImpl::remove(std::shared_ptr toRemove) { // Search for this node in 'nodes' vector. From 45c5104633213ff055609307b1014fe53771d6fc Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Fri, 30 Apr 2021 01:41:08 +0300 Subject: [PATCH 19/56] Optimization, comments. --- examples/CMakeLists.txt | 4 - examples/src/02_mono_preview.cpp | 16 +- examples/src/03_depth_preview.cpp | 16 +- examples/src/04_h264_encoding.cpp | 72 --------- examples/src/05_rgb_mono_encoding.cpp | 12 +- .../src/07_mono_full_resolution_saver.cpp | 8 +- examples/src/09_mono_mobilenet.cpp | 12 +- examples/src/10_mono_depth_mobilenetssd.cpp | 18 +-- .../src/11_rgb_encoding_mono_mobilenet.cpp | 22 +-- .../12_rgb_encoding_mono_mobilenet_depth.cpp | 38 ++--- examples/src/14_2_mono_camera_control.cpp | 20 +-- examples/src/14_3_depth_crop_control.cpp | 18 +-- examples/src/18_rgb_encoding_mobilenet.cpp | 21 +-- examples/src/20_image_manip_warp.cpp | 92 +++++------ .../src/21_mobilenet_device_side_decoding.cpp | 126 --------------- ...22_1_tiny_yolo_v3_device_side_decoding.cpp | 141 ++++++++--------- ...22_2_tiny_yolo_v4_device_side_decoding.cpp | 146 +++++++++--------- examples/src/24_opencv_support.cpp | 63 ++++---- examples/src/25_system_information.cpp | 61 +++----- examples/src/26_1_spatial_mobilenet.cpp | 135 ++++++++-------- 20 files changed, 414 insertions(+), 627 deletions(-) delete mode 100644 examples/src/04_h264_encoding.cpp delete mode 100644 examples/src/21_mobilenet_device_side_decoding.cpp diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 6d4f90b03..b2d2f44f3 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -176,10 +176,6 @@ dai_add_example(16_device_queue_event src/16_device_queue_event.cpp) # OpenCV support example dai_add_example(24_opencv_support src/24_opencv_support.cpp) -# Device side decoding example for mobilenet-ssd -dai_add_example(21_mobilenet_device_side_decoding src/21_mobilenet_device_side_decoding.cpp) -target_compile_definitions(21_mobilenet_device_side_decoding PRIVATE BLOB_PATH="${mobilenet_blob}") - # Device side decoding example for mobilenet-ssd with 3d coordinates on RGB camera dai_add_example(26_1_spatial_mobilenet src/26_1_spatial_mobilenet.cpp) target_compile_definitions(26_1_spatial_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") diff --git a/examples/src/02_mono_preview.cpp b/examples/src/02_mono_preview.cpp index 960471bb9..7e2e7c801 100644 --- a/examples/src/02_mono_preview.cpp +++ b/examples/src/02_mono_preview.cpp @@ -10,8 +10,8 @@ int main() { dai::Pipeline pipeline; // Define sources and outputs - auto camLeft = pipeline.create(); - auto camRight = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); auto xoutLeft = pipeline.create(); auto xoutRight = pipeline.create(); @@ -19,14 +19,14 @@ int main() { xoutRight->setStreamName("right"); // Properties - camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); - camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); - camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); - camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); // Linking - camRight->out.link(xoutRight->input); - camLeft->out.link(xoutLeft->input); + monoRight->out.link(xoutRight->input); + monoLeft->out.link(xoutLeft->input); dai::Device device(pipeline); device.startPipeline(); diff --git a/examples/src/03_depth_preview.cpp b/examples/src/03_depth_preview.cpp index 402d7eeea..ccef0dd93 100644 --- a/examples/src/03_depth_preview.cpp +++ b/examples/src/03_depth_preview.cpp @@ -16,17 +16,17 @@ int main() { dai::Pipeline pipeline; - auto camLeft = pipeline.create(); - auto camRight = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); auto depth = pipeline.create(); auto xout = pipeline.create(); xout->setStreamName("disparity"); - camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); - camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); - camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); - camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); // Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) depth->setConfidenceThreshold(200); @@ -49,8 +49,8 @@ int main() { float multiplier = 255 / max_disparity; // Linking - camLeft->out.link(depth->left); - camRight->out.link(depth->right); + monoLeft->out.link(depth->left); + monoRight->out.link(depth->right); depth->disparity.link(xout->input); // Pipeline is defined, now we can connect to the device diff --git a/examples/src/04_h264_encoding.cpp b/examples/src/04_h264_encoding.cpp deleted file mode 100644 index d366a4c0e..000000000 --- a/examples/src/04_h264_encoding.cpp +++ /dev/null @@ -1,72 +0,0 @@ - -#include -#include - -#include "utility.hpp" - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - - std::string h264Path("video.h264"); - - // If path specified, use that - if(argc > 1) { - h264Path = std::string(argv[1]); - } - - dai::Pipeline p; - - auto colorCam = p.create(); - auto xout = p.create(); - auto xout2 = p.create(); - auto videnc = p.create(); - - // XLinkOut - xout->setStreamName("h264"); - xout2->setStreamName("preview"); - - // ColorCamera - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - // colorCam->setFps(5.0); - colorCam->setInterleaved(true); - - // VideoEncoder - videnc->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H264_MAIN); - - // Link plugins CAM -> XLINK - colorCam->video.link(videnc->input); - colorCam->preview.link(xout2->input); - videnc->bitstream.link(xout->input); - - // Connect to device with above created pipeline - dai::Device d(p); - // Start the pipeline - d.startPipeline(); - - auto myfile = std::fstream(h264Path, std::ios::out | std::ios::binary); - - auto h264Queue = d.getOutputQueue("h264", 8, false); - auto previewQueue = d.getOutputQueue("preview", 8, false); - while(1) { - auto preview = previewQueue->get(); - cv::imshow("preview", cv::Mat(preview->getHeight(), preview->getWidth(), CV_8UC3, preview->getData().data())); - auto h264 = h264Queue->get(); - myfile.write((char*)h264->getData().data(), h264->getData().size()); - - int key = cv::waitKey(1); - if(key == 'q') { - break; - } - } - myfile.close(); - - std::cout << "To view the encoded data, convert the stream file " << h264Path << " into a video file (.mp4) using a command below:" << std::endl; - std::cout << "ffmpeg -framerate " << colorCam->getFps() << " -i " << h264Path << " -c copy video.mp4" << std::endl; - - return 0; -} diff --git a/examples/src/05_rgb_mono_encoding.cpp b/examples/src/05_rgb_mono_encoding.cpp index 4819515db..3039ce9b4 100644 --- a/examples/src/05_rgb_mono_encoding.cpp +++ b/examples/src/05_rgb_mono_encoding.cpp @@ -20,8 +20,8 @@ int main(int argc, char** argv) { // Define sources and outputs auto camRgb = pipeline.create(); - auto camLeft = pipeline.create(); - auto camRight = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); auto ve1 = pipeline.create(); auto ve2 = pipeline.create(); auto ve3 = pipeline.create(); @@ -36,17 +36,17 @@ int main(int argc, char** argv) { // Properties camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); - camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); - camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); // Create encoders, one for each camera, consuming the frames and encoding them using H.264 / H.265 encoding ve1->setDefaultProfilePreset(1280, 720, 30, dai::VideoEncoderProperties::Profile::H264_MAIN); ve2->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); ve3->setDefaultProfilePreset(1280, 720, 30, dai::VideoEncoderProperties::Profile::H264_MAIN); // Linking - camLeft->out.link(ve1->input); + monoLeft->out.link(ve1->input); camRgb->video.link(ve2->input); - camRight->out.link(ve3->input); + monoRight->out.link(ve3->input); ve1->bitstream.link(ve1Out->input); ve2->bitstream.link(ve2Out->input); ve3->bitstream.link(ve3Out->input); diff --git a/examples/src/07_mono_full_resolution_saver.cpp b/examples/src/07_mono_full_resolution_saver.cpp index b962d2cca..12ae13a09 100644 --- a/examples/src/07_mono_full_resolution_saver.cpp +++ b/examples/src/07_mono_full_resolution_saver.cpp @@ -14,17 +14,17 @@ int main(int argc, char** argv) dai::Pipeline pipeline; // Define sources and outputs - auto camRight = pipeline.create(); + auto monoRight = pipeline.create(); auto xoutRight = pipeline.create(); xoutRight->setStreamName("right"); // Properties - camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); - camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); // Linking - camRight->out.link(xoutRight->input); + monoRight->out.link(xoutRight->input); // Pipeline is defined, now we can connect to the device dai::Device device(pipeline); diff --git a/examples/src/09_mono_mobilenet.cpp b/examples/src/09_mono_mobilenet.cpp index 954ca92dc..dc09ff7eb 100644 --- a/examples/src/09_mono_mobilenet.cpp +++ b/examples/src/09_mono_mobilenet.cpp @@ -29,18 +29,18 @@ int main(int argc, char** argv) { dai::Pipeline pipeline; // Define sources and outputs - auto camRight = pipeline.create(); + auto monoRight = pipeline.create(); auto manip = pipeline.create(); auto nn = pipeline.create(); auto manipOut = pipeline.create(); auto nnOut = pipeline.create(); manipOut->setStreamName("right"); - nnOut->setStreamName("detections"); + nnOut->setStreamName("nn"); // Properties - camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); - camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); nn->setConfidenceThreshold(0.5); nn->setBlobPath(nnPath); nn->setNumInferenceThreads(2); @@ -48,10 +48,10 @@ int main(int argc, char** argv) { // Convert the grayscale frame into the nn-acceptable form manip->initialConfig.setResize(300, 300); // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) - manip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); + manip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p); // Linking - camRight->out.link(manip->inputImage); + monoRight->out.link(manip->inputImage); manip->out.link(nn->input); manip->out.link(manipOut->input); nn->out.link(nnOut->input); diff --git a/examples/src/10_mono_depth_mobilenetssd.cpp b/examples/src/10_mono_depth_mobilenetssd.cpp index f8efc1f57..2d168cdfd 100644 --- a/examples/src/10_mono_depth_mobilenetssd.cpp +++ b/examples/src/10_mono_depth_mobilenetssd.cpp @@ -31,8 +31,8 @@ int main(int argc, char** argv) { dai::Pipeline pipeline; // Define sources and outputs - auto camRight = pipeline.create(); - auto camLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto monoLeft = pipeline.create(); auto stereo = pipeline.create(); auto manip = pipeline.create(); auto nn = pipeline.create(); @@ -46,10 +46,10 @@ int main(int argc, char** argv) { nnOut->setStreamName("nn"); // Properties - camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); - camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); - camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); - camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); // Produce the depth map (using disparity output as it's easier to visualize depth this way) stereo->setOutputRectified(true); // The rectified streams are horizontally mirrored by default stereo->setConfidenceThreshold(255); @@ -57,7 +57,7 @@ int main(int argc, char** argv) { // Convert the grayscale frame into the nn-acceptable form manip->initialConfig.setResize(300, 300); // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) - manip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); + manip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p); // Define a neural network that will make predictions based on the source frames nn->setConfidenceThreshold(0.5); nn->setBlobPath(nnPath); @@ -65,8 +65,8 @@ int main(int argc, char** argv) { nn->input.setBlocking(false); // Linking - camRight->out.link(stereo->right); - camLeft->out.link(stereo->left); + monoRight->out.link(stereo->right); + monoLeft->out.link(stereo->left); stereo->rectifiedRight.link(manip->inputImage); stereo->disparity.link(disparityOut->input); manip->out.link(nn->input); diff --git a/examples/src/11_rgb_encoding_mono_mobilenet.cpp b/examples/src/11_rgb_encoding_mono_mobilenet.cpp index 1bebf95a6..554e1631b 100644 --- a/examples/src/11_rgb_encoding_mono_mobilenet.cpp +++ b/examples/src/11_rgb_encoding_mono_mobilenet.cpp @@ -30,7 +30,7 @@ int main(int argc, char** argv) { // Define sources and outputs auto camRgb = pipeline.create(); - auto camRight = pipeline.create(); + auto monoRight = pipeline.create(); auto videoEncoder = pipeline.create(); auto nn = pipeline.create(); auto manip = pipeline.create(); @@ -48,8 +48,8 @@ int main(int argc, char** argv) { // Properties camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); - camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); videoEncoder->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); nn->setConfidenceThreshold(0.5); @@ -58,15 +58,15 @@ int main(int argc, char** argv) { nn->input.setBlocking(false); // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) - manip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); + manip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p); manip->initialConfig.setResize(300, 300); // Linking camRgb->video.link(videoEncoder->input); videoEncoder->bitstream.link(videoOut->input); - camRight->out.link(manip->inputImage); + monoRight->out.link(manip->inputImage); manip->out.link(nn->input); - camRight->out.link(xoutRight->input); + monoRight->out.link(xoutRight->input); manip->out.link(manipOut->input); nn->out.link(nnOut->input); @@ -97,13 +97,13 @@ int main(int argc, char** argv) { auto out1 = qRgbEnc->get(); videoFile.write((char*)out1->getData().data(), out1->getData().size()); - int offsetX = (camRight->getResolutionWidth() - camRight->getResolutionHeight()) / 2; + int offsetX = (monoRight->getResolutionWidth() - monoRight->getResolutionHeight()) / 2; auto color = cv::Scalar(255, 0, 0); for(auto& detection : detections) { - int x1 = detection.xmin * camRight->getResolutionHeight() + offsetX; - int y1 = detection.ymin * camRight->getResolutionHeight(); - int x2 = detection.xmax * camRight->getResolutionHeight() + offsetX; - int y2 = detection.ymax * camRight->getResolutionHeight(); + int x1 = detection.xmin * monoRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * monoRight->getResolutionHeight(); + int x2 = detection.xmax * monoRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * monoRight->getResolutionHeight(); int labelIndex = detection.label; std::string labelStr = to_string(labelIndex); diff --git a/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp b/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp index 572bcdb10..5c26de6f4 100644 --- a/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp +++ b/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp @@ -33,8 +33,8 @@ int main(int argc, char** argv) { // Define sources and outputs auto camRgb = pipeline.create(); auto videoEncoder = pipeline.create(); - auto camLeft = pipeline.create(); - auto camRight = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); auto depth = pipeline.create(); auto nn = pipeline.create(); auto manip = pipeline.create(); @@ -55,10 +55,10 @@ int main(int argc, char** argv) { camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); videoEncoder->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); - camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); - camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); - camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); - camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); // Note: the rectified streams are horizontally mirrored by default depth->setOutputRectified(true); @@ -72,15 +72,15 @@ int main(int argc, char** argv) { nn->input.setBlocking(false); // The NN model expects BGR input-> By default ImageManip output type would be same as input (gray in this case) - manip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); + manip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p); manip->initialConfig.setResize(300, 300); // Linking camRgb->video.link(videoEncoder->input); videoEncoder->bitstream.link(videoOut->input); - camRight->out.link(xoutRight->input); - camRight->out.link(depth->right); - camLeft->out.link(depth->left); + monoRight->out.link(xoutRight->input); + monoRight->out.link(depth->right); + monoLeft->out.link(depth->left); depth->disparity.link(disparityOut->input); depth->rectifiedRight.link(manip->inputImage); manip->out.link(nn->input); @@ -127,13 +127,13 @@ int main(int argc, char** argv) { frameDisparity.convertTo(frameDisparity, CV_8UC1, disparity_multiplier); cv::applyColorMap(frameDisparity, frameDisparity, cv::COLORMAP_JET); - int offsetX = (camRight->getResolutionWidth() - camRight->getResolutionHeight()) / 2; + int offsetX = (monoRight->getResolutionWidth() - monoRight->getResolutionHeight()) / 2; auto color = cv::Scalar(255, 0, 0); for(auto& detection : detections) { - int x1 = detection.xmin * camRight->getResolutionHeight() + offsetX; - int y1 = detection.ymin * camRight->getResolutionHeight(); - int x2 = detection.xmax * camRight->getResolutionHeight() + offsetX; - int y2 = detection.ymax * camRight->getResolutionHeight(); + int x1 = detection.xmin * monoRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * monoRight->getResolutionHeight(); + int x2 = detection.xmax * monoRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * monoRight->getResolutionHeight(); int labelIndex = detection.label; std::string labelStr = to_string(labelIndex); @@ -150,10 +150,10 @@ int main(int argc, char** argv) { cv::imshow("right", frame); for(auto& detection : detections) { - int x1 = detection.xmin * camRight->getResolutionHeight() + offsetX; - int y1 = detection.ymin * camRight->getResolutionHeight(); - int x2 = detection.xmax * camRight->getResolutionHeight() + offsetX; - int y2 = detection.ymax * camRight->getResolutionHeight(); + int x1 = detection.xmin * monoRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * monoRight->getResolutionHeight(); + int x2 = detection.xmax * monoRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * monoRight->getResolutionHeight(); int labelIndex = detection.label; std::string labelStr = to_string(labelIndex); diff --git a/examples/src/14_2_mono_camera_control.cpp b/examples/src/14_2_mono_camera_control.cpp index 0854e7de0..19958e1f3 100644 --- a/examples/src/14_2_mono_camera_control.cpp +++ b/examples/src/14_2_mono_camera_control.cpp @@ -31,8 +31,8 @@ int main() { dai::Pipeline pipeline; // Define sources and outputs - auto camRight = pipeline.create(); - auto camLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto monoLeft = pipeline.create(); auto manipRight = pipeline.create(); auto manipLeft = pipeline.create(); @@ -51,18 +51,18 @@ int main() { dai::Point2f bottomRight(0.8, 0.8); // Properties - camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); - camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); - camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); - camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); manipRight->initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); manipLeft->initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); // Linking - camRight->out.link(manipRight->inputImage); - camLeft->out.link(manipLeft->inputImage); - controlIn->out.link(camRight->inputControl); - controlIn->out.link(camLeft->inputControl); + monoRight->out.link(manipRight->inputImage); + monoLeft->out.link(manipLeft->inputImage); + controlIn->out.link(monoRight->inputControl); + controlIn->out.link(monoLeft->inputControl); configIn->out.link(manipRight->inputConfig); configIn->out.link(manipLeft->inputConfig); manipRight->out.link(manipOutRight->input); diff --git a/examples/src/14_3_depth_crop_control.cpp b/examples/src/14_3_depth_crop_control.cpp index 57c0749a2..31e1d94b4 100644 --- a/examples/src/14_3_depth_crop_control.cpp +++ b/examples/src/14_3_depth_crop_control.cpp @@ -18,8 +18,8 @@ int main() { dai::Pipeline pipeline; // Define sources and outputs - auto camRight = pipeline.create(); - auto camLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto monoLeft = pipeline.create(); auto manip = pipeline.create(); auto stereo = pipeline.create(); @@ -34,13 +34,13 @@ int main() { dai::Point2f bottomRight(0.8, 0.8); // Properties - camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); - camLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); - camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); - camLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); manip->initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); - manip->setMaxOutputFrameSize(camRight->getResolutionHeight() * camRight->getResolutionWidth() * 3); + manip->setMaxOutputFrameSize(monoRight->getResolutionHeight() * monoRight->getResolutionWidth() * 3); stereo->setConfidenceThreshold(200); stereo->setOutputDepth(true); @@ -48,8 +48,8 @@ int main() { configIn->out.link(manip->inputConfig); stereo->depth.link(manip->inputImage); manip->out.link(xout->input); - camRight->out.link(stereo->left); - camLeft->out.link(stereo->right); + monoRight->out.link(stereo->left); + monoLeft->out.link(stereo->right); // Connect to device dai::Device device(pipeline); diff --git a/examples/src/18_rgb_encoding_mobilenet.cpp b/examples/src/18_rgb_encoding_mobilenet.cpp index c5af21218..c6087ea0a 100644 --- a/examples/src/18_rgb_encoding_mobilenet.cpp +++ b/examples/src/18_rgb_encoding_mobilenet.cpp @@ -29,14 +29,19 @@ int main(int argc, char** argv) { // Create pipeline dai::Pipeline pipeline; - // Define source + // Define sources and outputs auto camRgb = pipeline.create(); - auto xoutRgb = pipeline.create(); auto videoEncoder = pipeline.create(); - auto videoOut = pipeline.create(); auto nn = pipeline.create(); + + auto xoutRgb = pipeline.create(); + auto videoOut = pipeline.create(); auto nnOut = pipeline.create(); + xoutRgb->setStreamName("rgb"); + videoOut->setStreamName("h265"); + nnOut->setStreamName("nn"); + // Properties camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); @@ -50,11 +55,7 @@ int main(int argc, char** argv) { nn->setNumInferenceThreads(2); nn->input.setBlocking(false); - videoOut->setStreamName("h265"); - xoutRgb->setStreamName("rgb"); - nnOut->setStreamName("nn"); - - // Create outputs + // Linking camRgb->video.link(videoEncoder->input); camRgb->preview.link(xoutRgb->input); camRgb->preview.link(nn->input); @@ -73,7 +74,7 @@ int main(int argc, char** argv) { auto qRgbEnc = device.getOutputQueue("h265", 30, true); // Add bounding boxes and text to the frame and show it to the user - auto displayFrame = [](std::string name, auto frame, auto detections) { + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { auto color = cv::Scalar(255, 0, 0); // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height for(auto& detection : detections) { @@ -118,4 +119,4 @@ int main(int argc, char** argv) { } } return 0; -} \ No newline at end of file +} diff --git a/examples/src/20_image_manip_warp.cpp b/examples/src/20_image_manip_warp.cpp index 886b872aa..60f54ba23 100644 --- a/examples/src/20_image_manip_warp.cpp +++ b/examples/src/20_image_manip_warp.cpp @@ -4,21 +4,26 @@ #include "depthai/depthai.hpp" #include "utility.hpp" -struct warpFourPointTest { - std::vector points; - bool normalizedCoords; - const char* description; -}; +static constexpr auto keyRotateDecr = 'z'; +static constexpr auto keyRotateIncr = 'x'; +static constexpr auto keyResizeInc = 'v'; +static constexpr auto keyWarpTestCycle = 'c'; + +void printControls() { + printf("\n=== Controls:\n"); + printf(" %c -rotated rectangle crop, decrease rate\n", keyRotateDecr); + printf(" %c -rotated rectangle crop, increase rate\n", keyRotateIncr); + printf(" %c -warp 4-point transform, cycle through modes\n", keyWarpTestCycle); + printf(" %c -resize cropped region, or disable resize\n", keyResizeInc); + printf(" h -print controls (help)\n"); +} static constexpr auto ROTATE_RATE_MAX = 5.0; static constexpr auto ROTATE_RATE_INC = 0.1; -static constexpr auto KEY_ROTATE_DECR = 'z'; -static constexpr auto KEY_ROTATE_INCR = 'x'; static constexpr auto RESIZE_MAX_W = 800; static constexpr auto RESIZE_MAX_H = 600; static constexpr auto RESIZE_FACTOR_MAX = 5; -static constexpr auto KEY_RESIZE_INC = 'v'; /* The crop points are specified in clockwise order, * with first point mapped to output top-left, as: @@ -30,6 +35,11 @@ static const dai::Point2f P0 = {0, 0}; // top-left static const dai::Point2f P1 = {1, 0}; // top-right static const dai::Point2f P2 = {1, 1}; // bottom-right static const dai::Point2f P3 = {0, 1}; // bottom-left +struct warpFourPointTest { + std::vector points; + bool normalizedCoords; + const char* description; +}; std::vector warpList = { //{{{ 0, 0},{ 1, 0},{ 1, 1},{ 0, 1}}, true, "passthrough"}, @@ -44,42 +54,32 @@ std::vector warpList = { {{{-0.3, 0}, {1, 0}, {1.3, 1}, {0, 1}}, true, "8. parallelogram transform"}, {{{-0.2, 0}, {1.8, 0}, {1, 1}, {0, 1}}, true, "9. trapezoid transform"}, }; -static constexpr auto KEY_WARP_TEST_CYCLE = 'c'; - -void printControls() { - printf("\n=== Controls:\n"); - printf(" %c -rotated rectangle crop, decrease rate\n", KEY_ROTATE_DECR); - printf(" %c -rotated rectangle crop, increase rate\n", KEY_ROTATE_INCR); - printf(" %c -warp 4-point transform, cycle through modes\n", KEY_WARP_TEST_CYCLE); - printf(" %c -resize cropped region, or disable resize\n", KEY_RESIZE_INC); - printf(" h -print controls (help)\n"); -} int main() { dai::Pipeline pipeline; - auto colorCam = pipeline.create(); - auto camOut = pipeline.create(); + // Define sources and outputs + auto camRgb = pipeline.create(); auto manip = pipeline.create(); - auto manipCfg = pipeline.create(); + + auto camOut = pipeline.create(); auto manipOut = pipeline.create(); + auto manipCfg = pipeline.create(); camOut->setStreamName("preview"); manipOut->setStreamName("manip"); manipCfg->setStreamName("manipCfg"); - colorCam->setPreviewSize(640, 480); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + // Properties + camRgb->setPreviewSize(640, 480); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); manip->setMaxOutputFrameSize(2000 * 1500 * 3); - /* Link nodes: CAM --> XLINK(preview) - * \--> manip -> XLINK(manipOut) - * manipCfg ---/ - */ - colorCam->preview.link(camOut->input); - colorCam->preview.link(manip->inputImage); + // Linking + camRgb->preview.link(camOut->input); + camRgb->preview.link(manip->inputImage); manip->out.link(manipOut->input); manipCfg->out.link(manip->inputConfig); @@ -88,17 +88,19 @@ int main() { device.startPipeline(); // Create input & output queues - auto previewQueue = device.getOutputQueue("preview", 8, false); - auto manipQueue = device.getOutputQueue("manip", 8, false); - auto manipInQueue = device.getInputQueue("manipCfg"); + auto qPreview = device.getOutputQueue("preview", 8, false); + auto qManip = device.getOutputQueue("manip", 8, false); + auto qManipCfg = device.getInputQueue("manipCfg"); - std::vector frameQueues{previewQueue, manipQueue}; + std::vector frameQueues{qPreview, qManip}; // keep processing data int key = -1; float angleDeg = 0; float rotateRate = 1.0; - int resizeFactor = 0, resizeX, resizeY; + int resizeFactor = 0; + int resizeX = 0; + int resizeY = 0; bool testFourPt = false; int warpIdx = -1; @@ -107,15 +109,15 @@ int main() { while(key != 'q') { if(key >= 0) { printf("Pressed: %c | ", key); - if(key == KEY_ROTATE_DECR || key == KEY_ROTATE_INCR) { - if(key == KEY_ROTATE_DECR) { + if(key == keyRotateDecr || key == keyRotateIncr) { + if(key == keyRotateDecr) { if(rotateRate > -ROTATE_RATE_MAX) rotateRate -= ROTATE_RATE_INC; - } else if(key == KEY_ROTATE_INCR) { + } else if(key == keyRotateIncr) { if(rotateRate < ROTATE_RATE_MAX) rotateRate += ROTATE_RATE_INC; } testFourPt = false; printf("Crop rotated rectangle, rate: %g degrees", rotateRate); - } else if(key == KEY_RESIZE_INC) { + } else if(key == keyResizeInc) { resizeFactor++; if(resizeFactor > RESIZE_FACTOR_MAX) { resizeFactor = 0; @@ -125,10 +127,10 @@ int main() { resizeY = RESIZE_MAX_H / resizeFactor; printf("Crop region resized to: %d x %d", resizeX, resizeY); } - } else if(key == KEY_WARP_TEST_CYCLE) { + } else if(key == keyWarpTestCycle) { resizeFactor = 0; // Disable resizing initially warpIdx = (warpIdx + 1) % warpList.size(); - printf("Warp 4-point transform, %s", warpList[warpIdx].description); + printf("Warp 4-point transform: %s", warpList[warpIdx].description); testFourPt = true; } else if(key == 'h') { printControls(); @@ -146,15 +148,14 @@ int main() { dai::RotatedRect rr = {{320, 240}, // center {640, 480}, //{400, 400}, // size angleDeg}; - bool normalized = false; - cfg.setCropRotatedRect(rr, normalized); + cfg.setCropRotatedRect(rr, false); } if(resizeFactor > 0) { cfg.setResize(resizeX, resizeY); } // cfg.setWarpBorderFillColor(255, 0, 0); // cfg.setWarpBorderReplicatePixels(); - manipInQueue->send(cfg); + qManipCfg->send(cfg); } for(const auto& q : frameQueues) { @@ -164,4 +165,5 @@ int main() { } key = cv::waitKey(1); } + return 0; } diff --git a/examples/src/21_mobilenet_device_side_decoding.cpp b/examples/src/21_mobilenet_device_side_decoding.cpp deleted file mode 100644 index 828d0a8a3..000000000 --- a/examples/src/21_mobilenet_device_side_decoding.cpp +++ /dev/null @@ -1,126 +0,0 @@ -#include -#include -#include - -#include "utility.hpp" - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -static const std::vector labelMap = {"background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", - "car", "cat", "chair", "cow", "diningtable", "dog", "horse", - "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; - -static bool syncNN = true; - -dai::Pipeline createPipeline(std::string nnPath) { - dai::Pipeline p; - - auto camRgb = p.create(); - auto xlinkOut = p.create(); - auto detectionNetwork = p.create(); - auto nnOut = p.create(); - - xlinkOut->setStreamName("preview"); - nnOut->setStreamName("detections"); - - camRgb->setPreviewSize(300, 300); - camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - camRgb->setInterleaved(false); - camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); - camRgb->setFps(40); - - // testing MobileNet DetectionNetwork - detectionNetwork->setConfidenceThreshold(0.5f); - detectionNetwork->setBlobPath(nnPath); - - // Link plugins CAM -> NN -> XLINK - camRgb->preview.link(detectionNetwork->input); - if(syncNN) { - detectionNetwork->passthrough.link(xlinkOut->input); - } else { - camRgb->preview.link(xlinkOut->input); - } - - detectionNetwork->out.link(nnOut->input); - - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createPipeline(nnPath); - - // Connect to device with above created pipeline - dai::Device d(p); - // Start the pipeline - d.startPipeline(); - - auto preview = d.getOutputQueue("preview", 4, false); - auto detections = d.getOutputQueue("detections", 4, false); - - auto startTime = steady_clock::now(); - int counter = 0; - float fps = 0; - while(1) { - auto imgFrame = preview->get(); - auto det = detections->get(); - - counter++; - auto currentTime = steady_clock::now(); - auto elapsed = duration_cast>(currentTime - startTime); - if(elapsed > seconds(1)) { - fps = counter / elapsed.count(); - counter = 0; - startTime = currentTime; - } - - cv::Mat frame = imgFrame->getCvFrame(); - - auto color = cv::Scalar(255, 255, 255); - auto dets = det->detections; - for(const auto& d : dets) { - int x1 = d.xmin * frame.cols; - int y1 = d.ymin * frame.rows; - int x2 = d.xmax * frame.cols; - int y2 = d.ymax * frame.rows; - - int labelIndex = d.label; - std::string labelStr = to_string(labelIndex); - if(labelIndex < labelMap.size()) { - labelStr = labelMap[labelIndex]; - } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - - std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << d.confidence * 100; - cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - - cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); - } - - std::stringstream fpsStr; - fpsStr << std::fixed << std::setprecision(2) << fps; - cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); - - cv::imshow("preview", frame); - int key = cv::waitKey(1); - if(key == 'q') { - return 0; - } - } - - return 0; -} \ No newline at end of file diff --git a/examples/src/22_1_tiny_yolo_v3_device_side_decoding.cpp b/examples/src/22_1_tiny_yolo_v3_device_side_decoding.cpp index d56262234..7ba322f51 100644 --- a/examples/src/22_1_tiny_yolo_v3_device_side_decoding.cpp +++ b/examples/src/22_1_tiny_yolo_v3_device_side_decoding.cpp @@ -18,78 +18,103 @@ static const std::vector labelMap = { "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"}; -static bool syncNN = true; +static std::atomic syncNN{true}; -dai::Pipeline createPipeline(std::string nnPath) { - dai::Pipeline p; +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); - auto camRgb = p.create(); - auto xlinkOut = p.create(); - auto nnOut = p.create(); + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } - xlinkOut->setStreamName("preview"); - nnOut->setStreamName("detections"); + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + // Define sources and outputs + auto camRgb = pipeline.create(); + auto detectionNetwork = pipeline.create(); + auto xoutRgb = pipeline.create(); + auto nnOut = pipeline.create(); + + xoutRgb->setStreamName("rgb"); + nnOut->setStreamName("nn"); + + // Properties camRgb->setPreviewSize(416, 416); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); camRgb->setInterleaved(false); camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); camRgb->setFps(40); - auto detectionNetwork = p.create(); - - // network specific!! + // Network specific settings detectionNetwork->setConfidenceThreshold(0.5f); detectionNetwork->setNumClasses(80); detectionNetwork->setCoordinateSize(4); detectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319}); - detectionNetwork->setAnchorMasks({{"side13", {3, 4, 5}}, {"side26", {1, 2, 3}}}); + detectionNetwork->setAnchorMasks({{"side26", {1, 2, 3}}, {"side13", {3, 4, 5}}}); detectionNetwork->setIouThreshold(0.5f); detectionNetwork->setBlobPath(nnPath); + detectionNetwork->setNumInferenceThreads(2); + detectionNetwork->input.setBlocking(false); - // Link plugins CAM -> NN -> XLINK + // Linking camRgb->preview.link(detectionNetwork->input); if(syncNN) { - detectionNetwork->passthrough.link(xlinkOut->input); + detectionNetwork->passthrough.link(xoutRgb->input); } else { - camRgb->preview.link(xlinkOut->input); + camRgb->preview.link(xoutRgb->input); } detectionNetwork->out.link(nnOut->input); - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createPipeline(nnPath); - // Connect to device with above created pipeline - dai::Device d(p); + dai::Device device(pipeline); // Start the pipeline - d.startPipeline(); - - auto preview = d.getOutputQueue("preview", 4, false); - auto detections = d.getOutputQueue("detections", 4, false); + device.startPipeline(); + + auto qRgb = device.getOutputQueue("rgb", 4, false); + auto qDet = device.getOutputQueue("nn", 4, false); + + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; auto startTime = steady_clock::now(); int counter = 0; float fps = 0; - while(1) { - auto imgFrame = preview->get(); - auto det = detections->get(); + + while(true) { + auto inRgb = qRgb->get(); + auto inDet = qDet->get(); + auto detections = inDet->detections; + cv::Mat frame = inRgb->getCvFrame(); counter++; auto currentTime = steady_clock::now(); @@ -100,40 +125,16 @@ int main(int argc, char** argv) { startTime = currentTime; } - cv::Mat frame = imgFrame->getCvFrame(); - - auto color = cv::Scalar(255, 255, 255); - auto dets = det->detections; - for(const auto& d : dets) { - int x1 = d.xmin * frame.cols; - int y1 = d.ymin * frame.rows; - int x2 = d.xmax * frame.cols; - int y2 = d.ymax * frame.rows; - - int labelIndex = d.label; - std::string labelStr = to_string(labelIndex); - if(labelIndex < labelMap.size()) { - labelStr = labelMap[labelIndex]; - } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - - std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << d.confidence * 100; - cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - - cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); - } - std::stringstream fpsStr; fpsStr << std::fixed << std::setprecision(2) << fps; - cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); + cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, 255, 0, 0); + + displayFrame("video", frame, detections); - cv::imshow("preview", frame); int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } - return 0; } \ No newline at end of file diff --git a/examples/src/22_2_tiny_yolo_v4_device_side_decoding.cpp b/examples/src/22_2_tiny_yolo_v4_device_side_decoding.cpp index c34976c01..8df8f47ef 100644 --- a/examples/src/22_2_tiny_yolo_v4_device_side_decoding.cpp +++ b/examples/src/22_2_tiny_yolo_v4_device_side_decoding.cpp @@ -23,78 +23,104 @@ static const std::vector labelMap = { "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"}; -static bool syncNN = true; +static std::atomic syncNN{true}; -dai::Pipeline createPipeline(std::string nnPath) { - dai::Pipeline p; +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); - auto camRgb = p.create(); - auto xlinkOut = p.create(); - auto nnOut = p.create(); + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } - xlinkOut->setStreamName("preview"); + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto detectionNetwork = pipeline.create(); + auto xoutRgb = pipeline.create(); + auto nnOut = pipeline.create(); + + xoutRgb->setStreamName("rgb"); nnOut->setStreamName("detections"); + // Properties camRgb->setPreviewSize(416, 416); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); camRgb->setInterleaved(false); camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); camRgb->setFps(40); - auto detectionNetwork = p.create(); - - // network specific!! + // Network specific settings detectionNetwork->setConfidenceThreshold(0.5f); detectionNetwork->setNumClasses(80); detectionNetwork->setCoordinateSize(4); - detectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319}); - detectionNetwork->setAnchorMasks({{"side13", {3, 4, 5}}, {"side26", {1, 2, 3}}}); + detectionNetwork->setAnchors({10,14, 23,27, 37,58, 81,82, 135,169, 344,319}); + detectionNetwork->setAnchorMasks({{"side26", {1, 2, 3}},{"side13", {3, 4, 5}}}); detectionNetwork->setIouThreshold(0.5f); detectionNetwork->setBlobPath(nnPath); + detectionNetwork->setNumInferenceThreads(2); + detectionNetwork->input.setBlocking(false); - // Link plugins CAM -> NN -> XLINK + // Linking camRgb->preview.link(detectionNetwork->input); if(syncNN) { - detectionNetwork->passthrough.link(xlinkOut->input); + detectionNetwork->passthrough.link(xoutRgb->input); } else { - camRgb->preview.link(xlinkOut->input); + camRgb->preview.link(xoutRgb->input); } detectionNetwork->out.link(nnOut->input); - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createPipeline(nnPath); - // Connect to device with above created pipeline - dai::Device d(p); + dai::Device device(pipeline); // Start the pipeline - d.startPipeline(); - - auto preview = d.getOutputQueue("preview", 4, false); - auto detections = d.getOutputQueue("detections", 4, false); + device.startPipeline(); + + //Output queues will be used to get the rgb frames and nn data from the outputs defined above + auto qRgb = device.getOutputQueue("rgb", 4, false); + auto qDet = device.getOutputQueue("detections", 4, false); + + // Add bounding boxes and text to the frame and show it to the user + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { + auto color = cv::Scalar(255, 0, 0); + // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height + for(auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + } + // Show the frame + cv::imshow(name, frame); + }; auto startTime = steady_clock::now(); int counter = 0; float fps = 0; - while(1) { - auto imgFrame = preview->get(); - auto det = detections->get(); + + while(true) { + auto inRgb = qRgb->get(); + auto inDet = qDet->get(); + auto detections = inDet->detections; + cv::Mat frame = inRgb->getCvFrame(); counter++; auto currentTime = steady_clock::now(); @@ -105,40 +131,16 @@ int main(int argc, char** argv) { startTime = currentTime; } - cv::Mat frame = imgFrame->getCvFrame(); - - auto color = cv::Scalar(255, 255, 255); - auto dets = det->detections; - for(const auto& d : dets) { - int x1 = d.xmin * frame.cols; - int y1 = d.ymin * frame.rows; - int x2 = d.xmax * frame.cols; - int y2 = d.ymax * frame.rows; - - int labelIndex = d.label; - std::string labelStr = to_string(labelIndex); - if(labelIndex < labelMap.size()) { - labelStr = labelMap[labelIndex]; - } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - - std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << d.confidence * 100; - cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - - cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); - } - std::stringstream fpsStr; - fpsStr << std::fixed << std::setprecision(2) << fps; - cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); + fpsStr << "NN fps: "<< std::fixed << std::setprecision(2) << fps; + cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, 255, 0, 0); + + displayFrame("video", frame, detections); - cv::imshow("preview", frame); int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } - return 0; -} \ No newline at end of file +} diff --git a/examples/src/24_opencv_support.cpp b/examples/src/24_opencv_support.cpp index f042be393..a1d2500b3 100644 --- a/examples/src/24_opencv_support.cpp +++ b/examples/src/24_opencv_support.cpp @@ -7,53 +7,54 @@ // Include OpenCV #include -dai::Pipeline createCameraFullPipeline() { - dai::Pipeline p; - - auto colorCam = p.create(); - auto xlinkOut = p.create(); - auto xlinkOut2 = p.create(); - xlinkOut->setStreamName("video"); - xlinkOut2->setStreamName("preview"); - - colorCam->setPreviewSize(320, 180); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(true); - - // Link plugins CAM -> XLINK - colorCam->video.link(xlinkOut->input); - colorCam->preview.link(xlinkOut2->input); - - return p; -} - int main() { using namespace std; using namespace std::chrono; // Create pipeline - dai::Pipeline p = createCameraFullPipeline(); + dai::Pipeline pipeline; + + // Define source and outputs + auto camRgb = pipeline.create(); + auto xoutVideo = pipeline.create(); + auto xoutPreview = pipeline.create(); + + xoutVideo->setStreamName("video"); + xoutPreview->setStreamName("preview"); + + // Properties + camRgb->setPreviewSize(300, 300); + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(true); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + + // Linking + camRgb->video.link(xoutVideo->input); + camRgb->preview.link(xoutPreview->input); // Connect to device with above created pipeline - dai::Device d(p); + dai::Device device(pipeline); // Start the pipeline - d.startPipeline(); + device.startPipeline(); - auto video = d.getOutputQueue("video"); - auto preview = d.getOutputQueue("preview"); + auto video = device.getOutputQueue("video"); + auto preview = device.getOutputQueue("preview"); - while(1) { - // Retrieves video ImgFrame and converts a cv::Mat copy in BGR format (suitable for opencv usage) + while(true) { auto videoFrame = video->get(); + auto previewFrame = preview->get(); + + // Get BGR frame from NV12 encoded video frame to show with opencv cv::imshow("video", videoFrame->getCvFrame()); - // Retrieves preview ImgFrame and returns (without copying deepCopy = false) cv::Mat - auto previewFrame = preview->get(); + // Show 'preview' frame as is (already in correct format, no copy is made) cv::imshow("preview", previewFrame->getFrame()); - // Waits a bit and updates windows - if(cv::waitKey(1) == 'q') break; + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') + return 0; } return 0; } \ No newline at end of file diff --git a/examples/src/25_system_information.cpp b/examples/src/25_system_information.cpp index efae09c41..f773fefd0 100644 --- a/examples/src/25_system_information.cpp +++ b/examples/src/25_system_information.cpp @@ -1,62 +1,51 @@ - - #include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" -void printSystemInformation(dai::SystemInformation); +void printSystemInformation(dai::SystemInformation info) { + printf("Ddr used / total - %.2f / %.2f MiB\n", info.ddrMemoryUsage.used / (1024.0f * 1024.0f), info.ddrMemoryUsage.total / (1024.0f * 1024.0f)); + printf("Cmx used / total - %.2f / %.2f MiB\n", info.cmxMemoryUsage.used / (1024.0f * 1024.0f), info.cmxMemoryUsage.total / (1024.0f * 1024.0f)); + printf("LeonCss heap used / total - %.2f / %.2f MiB\n", + info.leonCssMemoryUsage.used / (1024.0f * 1024.0f), + info.leonCssMemoryUsage.total / (1024.0f * 1024.0f)); + printf("LeonMss heap used / total - %.2f / %.2f MiB\n", + info.leonMssMemoryUsage.used / (1024.0f * 1024.0f), + info.leonMssMemoryUsage.total / (1024.0f * 1024.0f)); + const auto& t = info.chipTemperature; + printf("Chip temperature - average: %.2f, css: %.2f, mss: %.2f, upa0: %.2f, upa1: %.2f\n", t.average, t.css, t.mss, t.upa, t.dss); + printf("Cpu usage - Leon OS: %.2f %%, Leon RT: %.2f %%\n", info.leonCssCpuUsage.average * 100, info.leonMssCpuUsage.average * 100); + printf("----------------------------------------\n"); +} int main() { using namespace std; + // Start defining a pipeline dai::Pipeline pipeline; + auto sysLog = pipeline.create(); auto xout = pipeline.create(); - // properties - sysLog->setRate(1.0f); // 1 hz updates xout->setStreamName("sysinfo"); - // links + // Properties + sysLog->setRate(1.0f); // 1 hz updates + + // Linking sysLog->out.link(xout->input); // Connect to device dai::Device device(pipeline); - - // Create 'sysinfo' queue - auto queue = device.getOutputQueue("sysinfo"); - - // Query device (before pipeline starts) - dai::MemoryInfo ddr = device.getDdrMemoryUsage(); - printf("Ddr used / total - %.2f / %.2f MiB\n", ddr.used / (1024.0f * 1024.0f), ddr.total / (1024.0f * 1024.0f)); - - dai::MemoryInfo cmx = device.getCmxMemoryUsage(); - printf("Cmx used / total - %.2f / %.2f MiB\n", cmx.used / (1024.0f * 1024.0f), cmx.total / (1024.0f * 1024.0f)); - // Start pipeline device.startPipeline(); - while(1) { - auto sysInfo = queue->get(); + // Output queue will be used to get the system info + auto qSysInfo = device.getOutputQueue("sysinfo", 4, false); + + while(true) { + auto sysInfo = qSysInfo->get(); printSystemInformation(*sysInfo); } } - -void printSystemInformation(dai::SystemInformation info) { - printf("Ddr used / total - %.2f / %.2f MiB\n", info.ddrMemoryUsage.used / (1024.0f * 1024.0f), info.ddrMemoryUsage.total / (1024.0f * 1024.0f)); - printf("Cmx used / total - %.2f / %.2f MiB\n", info.cmxMemoryUsage.used / (1024.0f * 1024.0f), info.cmxMemoryUsage.total / (1024.0f * 1024.0f)); - - printf("LeonCss heap used / total - %.2f / %.2f MiB\n", - info.leonCssMemoryUsage.used / (1024.0f * 1024.0f), - info.leonCssMemoryUsage.total / (1024.0f * 1024.0f)); - printf("LeonMss heap used / total - %.2f / %.2f MiB\n", - info.leonMssMemoryUsage.used / (1024.0f * 1024.0f), - info.leonMssMemoryUsage.total / (1024.0f * 1024.0f)); - const auto& t = info.chipTemperature; - printf("Chip temperature - average: %.2f, css: %.2f, mss: %.2f, upa0: %.2f, upa1: %.2f\n", t.average, t.css, t.mss, t.upa, t.dss); - printf("Cpu usage - Leon OS: %.2f %%, Leon RT: %.2f %%\n", info.leonCssCpuUsage.average * 100, info.leonMssCpuUsage.average * 100); -} \ No newline at end of file diff --git a/examples/src/26_1_spatial_mobilenet.cpp b/examples/src/26_1_spatial_mobilenet.cpp index a81cc47af..52b7f4c16 100644 --- a/examples/src/26_1_spatial_mobilenet.cpp +++ b/examples/src/26_1_spatial_mobilenet.cpp @@ -11,40 +11,53 @@ static const std::vector labelMap = {"background", "aeroplane", "bi "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; -static bool syncNN = true; +static std::atomic syncNN{true}; -dai::Pipeline createPipeline(std::string nnPath) { - dai::Pipeline p; +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); - // create nodes - auto colorCam = p.create(); - auto spatialDetectionNetwork = p.create(); - auto monoLeft = p.create(); - auto monoRight = p.create(); - auto stereo = p.create(); + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } - // create xlink connections - auto xoutRgb = p.create(); - auto xoutNN = p.create(); - auto xoutBoundingBoxDepthMapping = p.create(); - auto xoutDepth = p.create(); + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto spatialDetectionNetwork = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto stereo = pipeline.create(); - xoutRgb->setStreamName("preview"); + auto xoutRgb = pipeline.create(); + auto xoutNN = pipeline.create(); + auto xoutBoundingBoxDepthMapping = pipeline.create(); + auto xoutDepth = pipeline.create(); + + xoutRgb->setStreamName("rgb"); xoutNN->setStreamName("detections"); xoutBoundingBoxDepthMapping->setStreamName("boundingBoxDepthMapping"); xoutDepth->setStreamName("depth"); - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + // Properties + camRgb->setPreviewSize(300, 300); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); - /// setting node configs + // Setting node configs stereo->setOutputDepth(true); stereo->setConfidenceThreshold(255); @@ -55,16 +68,15 @@ dai::Pipeline createPipeline(std::string nnPath) { spatialDetectionNetwork->setDepthLowerThreshold(100); spatialDetectionNetwork->setDepthUpperThreshold(5000); - // Link plugins CAM -> STEREO -> XLINK + // Linking monoLeft->out.link(stereo->left); monoRight->out.link(stereo->right); - // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(spatialDetectionNetwork->input); + camRgb->preview.link(spatialDetectionNetwork->input); if(syncNN) spatialDetectionNetwork->passthrough.link(xoutRgb->input); else - colorCam->preview.link(xoutRgb->input); + camRgb->preview.link(xoutRgb->input); spatialDetectionNetwork->out.link(xoutNN->input); spatialDetectionNetwork->boundingBoxMapping.link(xoutBoundingBoxDepthMapping->input); @@ -72,46 +84,27 @@ dai::Pipeline createPipeline(std::string nnPath) { stereo->depth.link(spatialDetectionNetwork->inputDepth); spatialDetectionNetwork->passthroughDepth.link(xoutDepth->input); - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createPipeline(nnPath); - // Connect to device with above created pipeline - dai::Device d(p); + dai::Device device(pipeline); // Start the pipeline - d.startPipeline(); + device.startPipeline(); - auto preview = d.getOutputQueue("preview", 4, false); - auto detections = d.getOutputQueue("detections", 4, false); - auto xoutBoundingBoxDepthMapping = d.getOutputQueue("boundingBoxDepthMapping", 4, false); - auto depthQueue = d.getOutputQueue("depth", 4, false); + auto previewQueue = device.getOutputQueue("rgb", 4, false); + auto detectionNNQueue = device.getOutputQueue("detections", 4, false); + auto xoutBoundingBoxDepthMappingQueue = device.getOutputQueue("boundingBoxDepthMapping", 4, false); + auto depthQueue = device.getOutputQueue("depth", 4, false); auto startTime = steady_clock::now(); int counter = 0; float fps = 0; auto color = cv::Scalar(255, 255, 255); - while(1) { - auto imgFrame = preview->get(); - auto det = detections->get(); + while(true) { + auto inPreview = previewQueue->get(); + auto inDet = detectionNNQueue->get(); auto depth = depthQueue->get(); - auto dets = det->detections; + auto detections = inDet->detections; cv::Mat depthFrame = depth->getFrame(); cv::Mat depthFrameColor; @@ -119,8 +112,8 @@ int main(int argc, char** argv) { cv::equalizeHist(depthFrameColor, depthFrameColor); cv::applyColorMap(depthFrameColor, depthFrameColor, cv::COLORMAP_HOT); - if(!dets.empty()) { - auto boundingBoxMapping = xoutBoundingBoxDepthMapping->get(); + if(!detections.empty()) { + auto boundingBoxMapping = xoutBoundingBoxDepthMappingQueue->get(); auto roiDatas = boundingBoxMapping->getConfigData(); for(auto roiData : roiDatas) { @@ -145,32 +138,32 @@ int main(int argc, char** argv) { startTime = currentTime; } - cv::Mat frame = imgFrame->getCvFrame(); + cv::Mat frame = inPreview->getCvFrame(); - for(const auto& d : dets) { - int x1 = d.xmin * frame.cols; - int y1 = d.ymin * frame.rows; - int x2 = d.xmax * frame.cols; - int y2 = d.ymax * frame.rows; + for(const auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; - int labelIndex = d.label; + int labelIndex = detection.label; std::string labelStr = to_string(labelIndex); if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << d.confidence * 100; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream depthX; - depthX << "X: " << (int)d.spatialCoordinates.x << " mm"; + depthX << "X: " << (int)detection.spatialCoordinates.x << " mm"; cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream depthY; - depthY << "Y: " << (int)d.spatialCoordinates.y << " mm"; + depthY << "Y: " << (int)detection.spatialCoordinates.y << " mm"; cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream depthZ; - depthZ << "Z: " << (int)d.spatialCoordinates.z << " mm"; + depthZ << "Z: " << (int)detection.spatialCoordinates.z << " mm"; cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); @@ -178,15 +171,15 @@ int main(int argc, char** argv) { std::stringstream fpsStr; fpsStr << std::fixed << std::setprecision(2) << fps; - cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); + cv::putText(frame, fpsStr.str(), cv::Point(2, inPreview->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); cv::imshow("depth", depthFrameColor); - cv::imshow("preview", frame); + cv::imshow("rgb", frame); + int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } - return 0; -} \ No newline at end of file +} From f219a79042cb0d78a58189fd5865912a3440f5a0 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Fri, 30 Apr 2021 01:44:14 +0300 Subject: [PATCH 20/56] Replace RawImgFrame with ImgFrame --- tests/src/image_manip_node_test.cpp | 2 +- tests/src/neural_network_test.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/src/image_manip_node_test.cpp b/tests/src/image_manip_node_test.cpp index cba103ea4..d80742474 100644 --- a/tests/src/image_manip_node_test.cpp +++ b/tests/src/image_manip_node_test.cpp @@ -53,7 +53,7 @@ int main() { inFrame.getData().resize(originalWidth * originalHeight * 3); inFrame.setWidth(originalWidth); inFrame.setHeight(originalHeight); - inFrame.setType(dai::RawImgFrame::Type::RGB888p); + inFrame.setType(dai::ImgFrame::Type::RGB888p); // Send the frame in->send(inFrame); diff --git a/tests/src/neural_network_test.cpp b/tests/src/neural_network_test.cpp index fa0c0cd69..726baefc1 100644 --- a/tests/src/neural_network_test.cpp +++ b/tests/src/neural_network_test.cpp @@ -72,7 +72,7 @@ TEST_CASE("Neural network node data checks") { dai::ImgFrame frame; frame.setWidth(MOBILENET_WIDTH); frame.setHeight(MOBILENET_HEIGHT); - frame.setType(dai::RawImgFrame::Type::BGR888p); + frame.setType(dai::ImgFrame::Type::BGR888p); frame.setData(std::vector(MOBILENET_DATA_SIZE + i * 1024 * 10)); int msgIndex = 0; From 0e2e1f45c0cf5ab50af0c040a1485c7f29f14602 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Fri, 30 Apr 2021 02:46:19 +0300 Subject: [PATCH 21/56] Resolved some warnings --- examples/CMakeLists.txt | 10 ++++++- examples/src/03_depth_preview.cpp | 1 - examples/src/10_mono_depth_mobilenetssd.cpp | 3 +- .../12_rgb_encoding_mono_mobilenet_depth.cpp | 29 +++++++++---------- examples/src/14_3_depth_crop_control.cpp | 1 - examples/src/26_1_spatial_mobilenet.cpp | 1 - examples/src/26_2_spatial_mobilenet_mono.cpp | 2 +- examples/src/30_2_stereo_depth_video.cpp | 2 +- 8 files changed, 26 insertions(+), 23 deletions(-) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 32298ec16..3207765a2 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -79,6 +79,14 @@ hunter_private_data( LOCATION tiny_yolo_v4_blob ) +# NeuralNetwork node, mobilenet example, 5 shaves +hunter_private_data( + URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_5shave.blob" + SHA1 "d715f85e474609cf3f696d7a2e3750804ed6c726" + FILE "mobilenet-ssd_openvino_2021.2_5shave.blob" + LOCATION mobilenet_5shaves_blob +) + # NeuralNetwork node, mobilenet example, 8 shaves hunter_private_data( URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_8shave.blob" @@ -135,7 +143,7 @@ dai_add_example(14_2_mono_camera_control src/14_2_mono_camera_control.cpp) dai_add_example(14_3_depth_crop_control src/14_3_depth_crop_control.cpp) dai_add_example(15_rgb_mobilenet_4k src/15_rgb_mobilenet_4k.cpp) -target_compile_definitions(15_rgb_mobilenet_4k PRIVATE BLOB_PATH="${mobilenet_blob}") +target_compile_definitions(15_rgb_mobilenet_4k PRIVATE BLOB_PATH="${mobilenet_5shaves_blob}") dai_add_example(18_rgb_encoding_mobilenet src/18_rgb_encoding_mobilenet.cpp) target_compile_definitions(18_rgb_encoding_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") diff --git a/examples/src/03_depth_preview.cpp b/examples/src/03_depth_preview.cpp index ccef0dd93..9cacfdf70 100644 --- a/examples/src/03_depth_preview.cpp +++ b/examples/src/03_depth_preview.cpp @@ -30,7 +30,6 @@ int main() { // Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) depth->setConfidenceThreshold(200); - depth->setOutputDepth(false); // Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default) depth->setMedianFilter(dai::StereoDepthProperties::MedianFilter::KERNEL_7x7); depth->setLeftRightCheck(lr_check); diff --git a/examples/src/10_mono_depth_mobilenetssd.cpp b/examples/src/10_mono_depth_mobilenetssd.cpp index 2d168cdfd..85d314cb5 100644 --- a/examples/src/10_mono_depth_mobilenetssd.cpp +++ b/examples/src/10_mono_depth_mobilenetssd.cpp @@ -51,7 +51,6 @@ int main(int argc, char** argv) { monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); // Produce the depth map (using disparity output as it's easier to visualize depth this way) - stereo->setOutputRectified(true); // The rectified streams are horizontally mirrored by default stereo->setConfidenceThreshold(255); stereo->setRectifyEdgeFillColor(0); // Black, to better see the cutout from rectification (black stripe on the edges) // Convert the grayscale frame into the nn-acceptable form @@ -85,7 +84,7 @@ int main(int argc, char** argv) { // Add bounding boxes and text to the frame and show it to the user auto show = [](std::string name, cv::Mat frame, std::vector& detections) { - auto color = cv::Scalar(255, 0, 0); + auto color = cv::Scalar(255, 192, 203); // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height for(auto& detection : detections) { int x1 = detection.xmin * frame.cols; diff --git a/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp b/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp index 5c26de6f4..31fff649b 100644 --- a/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp +++ b/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp @@ -34,7 +34,7 @@ int main(int argc, char** argv) { auto camRgb = pipeline.create(); auto videoEncoder = pipeline.create(); auto monoLeft = pipeline.create(); - auto monoRight = pipeline.create(); + auto camRight = pipeline.create(); auto depth = pipeline.create(); auto nn = pipeline.create(); auto manip = pipeline.create(); @@ -55,13 +55,12 @@ int main(int argc, char** argv) { camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); videoEncoder->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); - monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); - monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); // Note: the rectified streams are horizontally mirrored by default - depth->setOutputRectified(true); depth->setConfidenceThreshold(255); depth->setRectifyMirrorFrame(false); depth->setRectifyEdgeFillColor(0); // Black, to better see the cutout @@ -78,8 +77,8 @@ int main(int argc, char** argv) { // Linking camRgb->video.link(videoEncoder->input); videoEncoder->bitstream.link(videoOut->input); - monoRight->out.link(xoutRight->input); - monoRight->out.link(depth->right); + camRight->out.link(xoutRight->input); + camRight->out.link(depth->right); monoLeft->out.link(depth->left); depth->disparity.link(disparityOut->input); depth->rectifiedRight.link(manip->inputImage); @@ -127,13 +126,13 @@ int main(int argc, char** argv) { frameDisparity.convertTo(frameDisparity, CV_8UC1, disparity_multiplier); cv::applyColorMap(frameDisparity, frameDisparity, cv::COLORMAP_JET); - int offsetX = (monoRight->getResolutionWidth() - monoRight->getResolutionHeight()) / 2; + int offsetX = (camRight->getResolutionWidth() - camRight->getResolutionHeight()) / 2; auto color = cv::Scalar(255, 0, 0); for(auto& detection : detections) { - int x1 = detection.xmin * monoRight->getResolutionHeight() + offsetX; - int y1 = detection.ymin * monoRight->getResolutionHeight(); - int x2 = detection.xmax * monoRight->getResolutionHeight() + offsetX; - int y2 = detection.ymax * monoRight->getResolutionHeight(); + int x1 = detection.xmin * camRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * camRight->getResolutionHeight(); + int x2 = detection.xmax * camRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * camRight->getResolutionHeight(); int labelIndex = detection.label; std::string labelStr = to_string(labelIndex); @@ -150,10 +149,10 @@ int main(int argc, char** argv) { cv::imshow("right", frame); for(auto& detection : detections) { - int x1 = detection.xmin * monoRight->getResolutionHeight() + offsetX; - int y1 = detection.ymin * monoRight->getResolutionHeight(); - int x2 = detection.xmax * monoRight->getResolutionHeight() + offsetX; - int y2 = detection.ymax * monoRight->getResolutionHeight(); + int x1 = detection.xmin * camRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * camRight->getResolutionHeight(); + int x2 = detection.xmax * camRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * camRight->getResolutionHeight(); int labelIndex = detection.label; std::string labelStr = to_string(labelIndex); diff --git a/examples/src/14_3_depth_crop_control.cpp b/examples/src/14_3_depth_crop_control.cpp index 31e1d94b4..265140ebc 100644 --- a/examples/src/14_3_depth_crop_control.cpp +++ b/examples/src/14_3_depth_crop_control.cpp @@ -42,7 +42,6 @@ int main() { manip->initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); manip->setMaxOutputFrameSize(monoRight->getResolutionHeight() * monoRight->getResolutionWidth() * 3); stereo->setConfidenceThreshold(200); - stereo->setOutputDepth(true); // Linking configIn->out.link(manip->inputConfig); diff --git a/examples/src/26_1_spatial_mobilenet.cpp b/examples/src/26_1_spatial_mobilenet.cpp index 52b7f4c16..7af031953 100644 --- a/examples/src/26_1_spatial_mobilenet.cpp +++ b/examples/src/26_1_spatial_mobilenet.cpp @@ -58,7 +58,6 @@ int main(int argc, char** argv) { monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); // Setting node configs - stereo->setOutputDepth(true); stereo->setConfidenceThreshold(255); spatialDetectionNetwork->setBlobPath(nnPath); diff --git a/examples/src/26_2_spatial_mobilenet_mono.cpp b/examples/src/26_2_spatial_mobilenet_mono.cpp index 152e3f83c..db1c67f86 100644 --- a/examples/src/26_2_spatial_mobilenet_mono.cpp +++ b/examples/src/26_2_spatial_mobilenet_mono.cpp @@ -32,7 +32,7 @@ dai::Pipeline createPipeline(std::string nnPath) { imageManip->initialConfig.setResize(300, 300); // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) - imageManip->initialConfig.setFrameType(dai::RawImgFrame::Type::BGR888p); + imageManip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p); auto monoLeft = p.create(); auto monoRight = p.create(); diff --git a/examples/src/30_2_stereo_depth_video.cpp b/examples/src/30_2_stereo_depth_video.cpp index 262cef977..ee0a43448 100644 --- a/examples/src/30_2_stereo_depth_video.cpp +++ b/examples/src/30_2_stereo_depth_video.cpp @@ -48,7 +48,7 @@ int main() { bool outputRectified = true; bool lrcheck = true; bool extended = false; - bool subpixel = true; + bool subpixel = false; int maxDisp = 96; if(extended) maxDisp *= 2; From e8e0c715ba5dbf2393e09a9327bc9d66cc9e8550 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Mon, 3 May 2021 00:33:07 +0300 Subject: [PATCH 22/56] Remove numbers and some optimization --- examples/CMakeLists.txt | 149 +++++++++-------- examples/src/28_1_rgb_camera_video.cpp | 65 -------- examples/src/28_2_mono_camera_video.cpp | 59 ------- .../src/camera_mobilenet_sync_example.cpp | 16 +- ...a_control.cpp => color_camera_control.cpp} | 0 ...rop_control.cpp => depth_crop_control.cpp} | 0 ...03_depth_preview.cpp => depth_preview.cpp} | 0 ...queue_event.cpp => device_queue_event.cpp} | 0 ...g_max_limit.cpp => encoding_max_limit.cpp} | 8 +- examples/src/image_manip_example.cpp | 14 +- ...ge_manip_warp.cpp => image_manip_warp.cpp} | 0 examples/src/mjpeg_encoding_example.cpp | 14 +- ...ra_control.cpp => mono_camera_control.cpp} | 0 examples/src/mono_camera_video.cpp | 48 ++++++ ...netssd.cpp => mono_depth_mobilenetssd.cpp} | 0 ...ver.cpp => mono_full_resolution_saver.cpp} | 0 ..._mono_mobilenet.cpp => mono_mobilenet.cpp} | 0 .../{02_mono_preview.cpp => mono_preview.cpp} | 0 ..._object_tracker.cpp => object_tracker.cpp} | 113 +++++++------ ..._opencv_support.cpp => opencv_support.cpp} | 0 examples/src/rgb_camera_video.cpp | 50 ++++++ examples/src/rgb_depth_aligned_example.cpp | 54 +++--- .../{04_rgb_encoding.cpp => rgb_encoding.cpp} | 0 ...bilenet.cpp => rgb_encoding_mobilenet.cpp} | 0 ...et.cpp => rgb_encoding_mono_mobilenet.cpp} | 0 ... => rgb_encoding_mono_mobilenet_depth.cpp} | 0 ...aver.cpp => rgb_full_resolution_saver.cpp} | 0 ...08_rgb_mobilenet.cpp => rgb_mobilenet.cpp} | 0 ..._mobilenet_4k.cpp => rgb_mobilenet_4k.cpp} | 0 ...ono_encoding.cpp => rgb_mono_encoding.cpp} | 0 .../{01_rgb_preview.cpp => rgb_preview.cpp} | 0 ...or.cpp => spatial_location_calculator.cpp} | 59 +++---- ...al_mobilenet.cpp => spatial_mobilenet.cpp} | 0 ...et_mono.cpp => spatial_mobilenet_mono.cpp} | 149 ++++++++--------- ...tracker.cpp => spatial_object_tracker.cpp} | 14 +- ...al_tiny_yolo.cpp => spatial_tiny_yolo.cpp} | 154 +++++++++--------- ...depth_video.cpp => stereo_depth_video.cpp} | 2 - ...information.cpp => system_information.cpp} | 0 ... => tiny_yolo_v3_device_side_decoding.cpp} | 0 ... => tiny_yolo_v4_device_side_decoding.cpp} | 0 ...ideo_mobilenet.cpp => video_mobilenet.cpp} | 0 41 files changed, 465 insertions(+), 503 deletions(-) delete mode 100644 examples/src/28_1_rgb_camera_video.cpp delete mode 100644 examples/src/28_2_mono_camera_video.cpp rename examples/src/{14_1_color_camera_control.cpp => color_camera_control.cpp} (100%) rename examples/src/{14_3_depth_crop_control.cpp => depth_crop_control.cpp} (100%) rename examples/src/{03_depth_preview.cpp => depth_preview.cpp} (100%) rename examples/src/{16_device_queue_event.cpp => device_queue_event.cpp} (100%) rename examples/src/{13_encoding_max_limit.cpp => encoding_max_limit.cpp} (93%) rename examples/src/{20_image_manip_warp.cpp => image_manip_warp.cpp} (100%) rename examples/src/{14_2_mono_camera_control.cpp => mono_camera_control.cpp} (100%) create mode 100644 examples/src/mono_camera_video.cpp rename examples/src/{10_mono_depth_mobilenetssd.cpp => mono_depth_mobilenetssd.cpp} (100%) rename examples/src/{07_mono_full_resolution_saver.cpp => mono_full_resolution_saver.cpp} (100%) rename examples/src/{09_mono_mobilenet.cpp => mono_mobilenet.cpp} (100%) rename examples/src/{02_mono_preview.cpp => mono_preview.cpp} (100%) rename examples/src/{29_1_object_tracker.cpp => object_tracker.cpp} (60%) rename examples/src/{24_opencv_support.cpp => opencv_support.cpp} (100%) create mode 100644 examples/src/rgb_camera_video.cpp rename examples/src/{04_rgb_encoding.cpp => rgb_encoding.cpp} (100%) rename examples/src/{18_rgb_encoding_mobilenet.cpp => rgb_encoding_mobilenet.cpp} (100%) rename examples/src/{11_rgb_encoding_mono_mobilenet.cpp => rgb_encoding_mono_mobilenet.cpp} (100%) rename examples/src/{12_rgb_encoding_mono_mobilenet_depth.cpp => rgb_encoding_mono_mobilenet_depth.cpp} (100%) rename examples/src/{06_rgb_full_resolution_saver.cpp => rgb_full_resolution_saver.cpp} (100%) rename examples/src/{08_rgb_mobilenet.cpp => rgb_mobilenet.cpp} (100%) rename examples/src/{15_rgb_mobilenet_4k.cpp => rgb_mobilenet_4k.cpp} (100%) rename examples/src/{05_rgb_mono_encoding.cpp => rgb_mono_encoding.cpp} (100%) rename examples/src/{01_rgb_preview.cpp => rgb_preview.cpp} (100%) rename examples/src/{27_spatial_location_calculator.cpp => spatial_location_calculator.cpp} (85%) rename examples/src/{26_1_spatial_mobilenet.cpp => spatial_mobilenet.cpp} (100%) rename examples/src/{26_2_spatial_mobilenet_mono.cpp => spatial_mobilenet_mono.cpp} (65%) rename examples/src/{29_2_spatial_object_tracker.cpp => spatial_object_tracker.cpp} (93%) rename examples/src/{26_3_spatial_tiny_yolo.cpp => spatial_tiny_yolo.cpp} (72%) rename examples/src/{30_2_stereo_depth_video.cpp => stereo_depth_video.cpp} (99%) rename examples/src/{25_system_information.cpp => system_information.cpp} (100%) rename examples/src/{22_1_tiny_yolo_v3_device_side_decoding.cpp => tiny_yolo_v3_device_side_decoding.cpp} (100%) rename examples/src/{22_2_tiny_yolo_v4_device_side_decoding.cpp => tiny_yolo_v4_device_side_decoding.cpp} (100%) rename examples/src/{17_video_mobilenet.cpp => video_mobilenet.cpp} (100%) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 3207765a2..03ec1440c 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -106,125 +106,124 @@ hunter_private_data( ## message(STATUS "Location of test1.data: ${test1_data}") # Color camera preview output example -dai_add_example(01_rgb_preview src/01_rgb_preview.cpp) +dai_add_example(rgb_preview src/rgb_preview.cpp) # Mono camera preview output example -dai_add_example(02_mono_preview src/02_mono_preview.cpp) +dai_add_example(mono_preview src/mono_preview.cpp) # Depth preview output example -dai_add_example(03_depth_preview src/03_depth_preview.cpp) +dai_add_example(depth_preview src/depth_preview.cpp) # rgb encoding -dai_add_example(04_rgb_encoding src/04_rgb_encoding.cpp) +dai_add_example(rgb_encoding src/rgb_encoding.cpp) # rgb+mono encoding -dai_add_example(05_rgb_mono_encoding src/05_rgb_mono_encoding.cpp) +dai_add_example(rgb_mono_encoding src/rgb_mono_encoding.cpp) -dai_add_example(06_rgb_full_resolution_saver src/06_rgb_full_resolution_saver.cpp) +dai_add_example(rgb_full_resolution_saver src/rgb_full_resolution_saver.cpp) -dai_add_example(07_mono_full_resolution_saver src/07_mono_full_resolution_saver.cpp) +dai_add_example(mono_full_resolution_saver src/mono_full_resolution_saver.cpp) -dai_add_example(09_mono_mobilenet src/09_mono_mobilenet.cpp) -target_compile_definitions(09_mono_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(rgb_mobilenet src/rgb_mobilenet.cpp) +target_compile_definitions(rgb_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") -dai_add_example(10_mono_depth_mobilenetssd src/10_mono_depth_mobilenetssd.cpp) -target_compile_definitions(10_mono_depth_mobilenetssd PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(mono_mobilenet src/mono_mobilenet.cpp) +target_compile_definitions(mono_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") -dai_add_example(11_rgb_encoding_mono_mobilenet src/11_rgb_encoding_mono_mobilenet.cpp) -target_compile_definitions(11_rgb_encoding_mono_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(mono_depth_mobilenetssd src/mono_depth_mobilenetssd.cpp) +target_compile_definitions(mono_depth_mobilenetssd PRIVATE BLOB_PATH="${mobilenet_blob}") -dai_add_example(12_rgb_encoding_mono_mobilenet_depth src/12_rgb_encoding_mono_mobilenet_depth.cpp) -target_compile_definitions(12_rgb_encoding_mono_mobilenet_depth PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(rgb_encoding_mono_mobilenet src/rgb_encoding_mono_mobilenet.cpp) +target_compile_definitions(rgb_encoding_mono_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") -dai_add_example(13_encoding_max_limit src/13_encoding_max_limit.cpp) +dai_add_example(rgb_encoding_mono_mobilenet_depth src/rgb_encoding_mono_mobilenet_depth.cpp) +target_compile_definitions(rgb_encoding_mono_mobilenet_depth PRIVATE BLOB_PATH="${mobilenet_blob}") -dai_add_example(14_2_mono_camera_control src/14_2_mono_camera_control.cpp) - -dai_add_example(14_3_depth_crop_control src/14_3_depth_crop_control.cpp) - -dai_add_example(15_rgb_mobilenet_4k src/15_rgb_mobilenet_4k.cpp) -target_compile_definitions(15_rgb_mobilenet_4k PRIVATE BLOB_PATH="${mobilenet_5shaves_blob}") - -dai_add_example(18_rgb_encoding_mobilenet src/18_rgb_encoding_mobilenet.cpp) -target_compile_definitions(18_rgb_encoding_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(encoding_max_limit src/encoding_max_limit.cpp) +# Color Camera config example +dai_add_example(color_camera_control src/color_camera_control.cpp) -# Color camera video output example -dai_add_example(28_1_rgb_camera_video src/28_1_rgb_camera_video.cpp) +dai_add_example(mono_camera_control src/mono_camera_control.cpp) -# Mono camera video output example -dai_add_example(28_2_mono_camera_video src/28_2_mono_camera_video.cpp) +dai_add_example(depth_crop_control src/depth_crop_control.cpp) -dai_add_example(08_rgb_mobilenet src/08_rgb_mobilenet.cpp) -target_compile_definitions(08_rgb_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(rgb_mobilenet_4k src/rgb_mobilenet_4k.cpp) +target_compile_definitions(rgb_mobilenet_4k PRIVATE BLOB_PATH="${mobilenet_5shaves_blob}") -# NeuralNetwork node, webcam input (from host) -dai_add_example(webcam_mobilenet src/webcam_mobilenet_example.cpp) -target_compile_definitions(webcam_mobilenet PRIVATE BLOB_PATH="${mobilenet_8shaves_blob}") +# Device getQueueEvent example +dai_add_example(device_queue_event src/device_queue_event.cpp) -# MJPEG encoding -dai_add_example(mjpeg_encoding src/mjpeg_encoding_example.cpp) +dai_add_example(video_mobilenet src/video_mobilenet.cpp) +target_compile_definitions(video_mobilenet PRIVATE BLOB_PATH="${mobilenet_8shaves_blob}" VIDEO_PATH="${construction_vest}") -# Stereo Depth example -dai_add_example(30_2_stereo_depth_video src/30_2_stereo_depth_video.cpp) +dai_add_example(rgb_encoding_mobilenet src/rgb_encoding_mobilenet.cpp) +target_compile_definitions(rgb_encoding_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") # Image Manip node examples dai_add_example(image_manip src/image_manip_example.cpp) -dai_add_example(20_image_manip_warp src/20_image_manip_warp.cpp) -# Color Camera config example -dai_add_example(14_1_color_camera_control src/14_1_color_camera_control.cpp) +dai_add_example(image_manip_warp src/image_manip_warp.cpp) -# System information example -dai_add_example(25_system_information src/25_system_information.cpp) +# Device side decoding example for tiny-yolo-v3 +dai_add_example(tiny_yolo_v3_device_side_decoding src/tiny_yolo_v3_device_side_decoding.cpp) +target_compile_definitions(tiny_yolo_v3_device_side_decoding PRIVATE BLOB_PATH="${tiny_yolo_v3_blob}") -# Device getQueueEvent example -dai_add_example(16_device_queue_event src/16_device_queue_event.cpp) +# Device side decoding example for tiny-yolo-v4 +dai_add_example(tiny_yolo_v4_device_side_decoding src/tiny_yolo_v4_device_side_decoding.cpp) +target_compile_definitions(tiny_yolo_v4_device_side_decoding PRIVATE BLOB_PATH="${tiny_yolo_v4_blob}") # OpenCV support example -dai_add_example(24_opencv_support src/24_opencv_support.cpp) +dai_add_example(opencv_support src/opencv_support.cpp) -# RGB-depth alignment example -dai_add_example(rgb_depth_aligned src/rgb_depth_aligned_example.cpp) +# System information example +dai_add_example(system_information src/system_information.cpp) # Device side decoding example for mobilenet-ssd with 3d coordinates on RGB camera -dai_add_example(26_1_spatial_mobilenet src/26_1_spatial_mobilenet.cpp) -target_compile_definitions(26_1_spatial_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(spatial_mobilenet src/spatial_mobilenet.cpp) +target_compile_definitions(spatial_mobilenet PRIVATE BLOB_PATH="${mobilenet_blob}") # Device side decoding example for mobilenet-ssd with 3d coordinates on right camera -dai_add_example(26_2_spatial_mobilenet_mono src/26_2_spatial_mobilenet_mono.cpp) -target_compile_definitions(26_2_spatial_mobilenet_mono PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(spatial_mobilenet_mono src/spatial_mobilenet_mono.cpp) +target_compile_definitions(spatial_mobilenet_mono PRIVATE BLOB_PATH="${mobilenet_blob}") -# Device side decoding example for tiny-yolo-v3 -dai_add_example(22_1_tiny_yolo_v3_device_side_decoding src/22_1_tiny_yolo_v3_device_side_decoding.cpp) -target_compile_definitions(22_1_tiny_yolo_v3_device_side_decoding PRIVATE BLOB_PATH="${tiny_yolo_v3_blob}") - -# Device side decoding example for tiny-yolo-v4 -dai_add_example(22_2_tiny_yolo_v4_device_side_decoding src/22_2_tiny_yolo_v4_device_side_decoding.cpp) -target_compile_definitions(22_2_tiny_yolo_v4_device_side_decoding PRIVATE BLOB_PATH="${tiny_yolo_v4_blob}") +# Device side decoding example for tiny-yolo-v3 with 3d coordinates on RGB camera +dai_add_example(spatial_tiny_yolo_v3 src/spatial_tiny_yolo.cpp) +target_compile_definitions(spatial_tiny_yolo_v3 PRIVATE BLOB_PATH="${tiny_yolo_v3_blob}") -# Sync example between NN and camera frames -dai_add_example(camera_mobilenet_sync src/camera_mobilenet_sync_example.cpp) -target_compile_definitions(camera_mobilenet_sync PRIVATE BLOB_PATH="${mobilenet_blob}") +# Device side decoding example for tiny-yolo-v4 with 3d coordinates on RGB camera +dai_add_example(spatial_tiny_yolo_v4 src/spatial_tiny_yolo.cpp) +target_compile_definitions(spatial_tiny_yolo_v4 PRIVATE BLOB_PATH="${tiny_yolo_v4_blob}") -dai_add_example(27_spatial_location_calculator src/27_spatial_location_calculator.cpp) +dai_add_example(spatial_location_calculator src/spatial_location_calculator.cpp) -# Device side decoding example for tiny-yolo-v3 with 3d coordinates on RGB camera -dai_add_example(26_3_spatial_tiny_yolo_v3 src/26_3_spatial_tiny_yolo.cpp) -target_compile_definitions(26_3_spatial_tiny_yolo_v3 PRIVATE BLOB_PATH="${tiny_yolo_v3_blob}") +# Color camera video output example +dai_add_example(rgb_camera_video src/rgb_camera_video.cpp) -# Device side decoding example for tiny-yolo-v4 with 3d coordinates on RGB camera -dai_add_example(26_4_spatial_tiny_yolo_v4 src/26_3_spatial_tiny_yolo.cpp) -target_compile_definitions(26_4_spatial_tiny_yolo_v4 PRIVATE BLOB_PATH="${tiny_yolo_v4_blob}") +# Mono camera video output example +dai_add_example(mono_camera_video src/mono_camera_video.cpp) # Object tracker example on mobilenet SSD output -dai_add_example(29_1_object_tracker src/29_1_object_tracker.cpp) -target_compile_definitions(29_1_object_tracker PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(object_tracker src/object_tracker.cpp) +target_compile_definitions(object_tracker PRIVATE BLOB_PATH="${mobilenet_blob}") # Spatial Object tracker example on mobilenet SSD output -dai_add_example(29_2_spatial_object_tracker src/29_2_spatial_object_tracker.cpp) -target_compile_definitions(29_2_spatial_object_tracker PRIVATE BLOB_PATH="${mobilenet_blob}") +dai_add_example(spatial_object_tracker src/spatial_object_tracker.cpp) +target_compile_definitions(spatial_object_tracker PRIVATE BLOB_PATH="${mobilenet_blob}") -dai_add_example(17_video_mobilenet src/17_video_mobilenet.cpp) -target_compile_definitions(17_video_mobilenet PRIVATE BLOB_PATH="${mobilenet_8shaves_blob}" VIDEO_PATH="${construction_vest}") +# Stereo Depth example +dai_add_example(stereo_depth_video src/stereo_depth_video.cpp) +# NeuralNetwork node, webcam input (from host) +dai_add_example(webcam_mobilenet src/webcam_mobilenet_example.cpp) +target_compile_definitions(webcam_mobilenet PRIVATE BLOB_PATH="${mobilenet_8shaves_blob}") + +# MJPEG encoding +dai_add_example(mjpeg_encoding src/mjpeg_encoding_example.cpp) + +# RGB-depth alignment example +dai_add_example(rgb_depth_aligned src/rgb_depth_aligned_example.cpp) + +# Sync example between NN and camera frames +dai_add_example(camera_mobilenet_sync src/camera_mobilenet_sync_example.cpp) +target_compile_definitions(camera_mobilenet_sync PRIVATE BLOB_PATH="${mobilenet_blob}") diff --git a/examples/src/28_1_rgb_camera_video.cpp b/examples/src/28_1_rgb_camera_video.cpp deleted file mode 100644 index cecb6a6fb..000000000 --- a/examples/src/28_1_rgb_camera_video.cpp +++ /dev/null @@ -1,65 +0,0 @@ - -#include -#include - -#include "utility.hpp" - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -dai::Pipeline createPipeline() { - dai::Pipeline p; - - auto colorCam = p.create(); - - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(true); - - auto xlinkOut = p.create(); - xlinkOut->setStreamName("video"); - // Link plugins CAM -> XLINK - colorCam->video.link(xlinkOut->input); - - return p; -} - -int main() { - using namespace std; - using namespace std::chrono; - - // Create pipeline - dai::Pipeline p = createPipeline(); - - // Connect to device with above created pipeline - dai::Device d(p); - // Start the pipeline - d.startPipeline(); - - cv::Mat frame; - auto video = d.getOutputQueue("video"); - - while(1) { - auto imgFrame = video->get(); - if(imgFrame) { - auto dur = steady_clock::now() - imgFrame->getTimestamp(); - - printf("Frame - w: %d, h: %d, latency: %ldms\n", imgFrame->getWidth(), imgFrame->getHeight(), duration_cast(dur).count()); - - frame = cv::Mat(imgFrame->getHeight() * 3 / 2, imgFrame->getWidth(), CV_8UC1, imgFrame->getData().data()); - - cv::Mat rgb(imgFrame->getHeight(), imgFrame->getWidth(), CV_8UC3); - - cv::cvtColor(frame, rgb, cv::COLOR_YUV2BGR_NV12); - - cv::imshow("video", rgb); - int key = cv::waitKey(1); - if(key == 'q') { - return 0; - } - } else { - std::cout << "Not ImgFrame" << std::endl; - } - } - return 0; -} \ No newline at end of file diff --git a/examples/src/28_2_mono_camera_video.cpp b/examples/src/28_2_mono_camera_video.cpp deleted file mode 100644 index 9f2a7e4f9..000000000 --- a/examples/src/28_2_mono_camera_video.cpp +++ /dev/null @@ -1,59 +0,0 @@ - -#include -#include - -#include "utility.hpp" - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -dai::Pipeline createMonoPipeline() { - dai::Pipeline p; - - auto monoCam = p.create(); - auto xlinkOut = p.create(); - xlinkOut->setStreamName("mono"); - - // Set camera socket - monoCam->setBoardSocket(dai::CameraBoardSocket::RIGHT); - - // Link plugins CAM -> XLINK - monoCam->out.link(xlinkOut->input); - - return p; -} - -int main() { - using namespace std; - using namespace std::chrono; - - // Create pipeline - dai::Pipeline p = createMonoPipeline(); - - // Connect to device with above created pipeline - dai::Device d(p); - // Start the pipeline - d.startPipeline(); - - cv::Mat frame; - auto monoQueue = d.getOutputQueue("mono"); - - while(1) { - auto imgFrame = monoQueue->get(); - if(imgFrame) { - int latencyMs = duration_cast(steady_clock::now() - imgFrame->getTimestamp()).count(); - printf("Frame - w: %d, h: %d, latency %dms\n", imgFrame->getWidth(), imgFrame->getHeight(), latencyMs); - - frame = cv::Mat(imgFrame->getHeight(), imgFrame->getWidth(), CV_8UC1, imgFrame->getData().data()); - - cv::imshow("video", frame); - int key = cv::waitKey(1); - if(key == 'q') { - return 0; - } - } else { - std::cout << "Not ImgFrame" << std::endl; - } - } - return 0; -} \ No newline at end of file diff --git a/examples/src/camera_mobilenet_sync_example.cpp b/examples/src/camera_mobilenet_sync_example.cpp index a8039b59a..de52df2d3 100644 --- a/examples/src/camera_mobilenet_sync_example.cpp +++ b/examples/src/camera_mobilenet_sync_example.cpp @@ -23,7 +23,7 @@ int main(int argc, char** argv) { dai::Pipeline pipeline; - auto colorCam = pipeline.create(); + auto camRgb = pipeline.create(); auto nn = pipeline.create(); auto camOut = pipeline.create(); auto passthroughMeta = pipeline.create(); @@ -35,11 +35,11 @@ int main(int argc, char** argv) { resultOut->setStreamName("resultOut"); // ColorCamera options - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); - colorCam->setFps(CAMERA_FPS); + camRgb->setPreviewSize(300, 300); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + camRgb->setFps(CAMERA_FPS); // NN input options nn->input.setBlocking(false); @@ -49,8 +49,8 @@ int main(int argc, char** argv) { nn->setNumInferenceThreads(2); // Link nodes CAM -> XLINK - colorCam->preview.link(nn->input); - colorCam->preview.link(camOut->input); + camRgb->preview.link(nn->input); + camRgb->preview.link(camOut->input); nn->passthrough.link(passthroughMeta->input); nn->out.link(resultOut->input); diff --git a/examples/src/14_1_color_camera_control.cpp b/examples/src/color_camera_control.cpp similarity index 100% rename from examples/src/14_1_color_camera_control.cpp rename to examples/src/color_camera_control.cpp diff --git a/examples/src/14_3_depth_crop_control.cpp b/examples/src/depth_crop_control.cpp similarity index 100% rename from examples/src/14_3_depth_crop_control.cpp rename to examples/src/depth_crop_control.cpp diff --git a/examples/src/03_depth_preview.cpp b/examples/src/depth_preview.cpp similarity index 100% rename from examples/src/03_depth_preview.cpp rename to examples/src/depth_preview.cpp diff --git a/examples/src/16_device_queue_event.cpp b/examples/src/device_queue_event.cpp similarity index 100% rename from examples/src/16_device_queue_event.cpp rename to examples/src/device_queue_event.cpp diff --git a/examples/src/13_encoding_max_limit.cpp b/examples/src/encoding_max_limit.cpp similarity index 93% rename from examples/src/13_encoding_max_limit.cpp rename to examples/src/encoding_max_limit.cpp index 94f34f1d3..f5e15d2a0 100644 --- a/examples/src/13_encoding_max_limit.cpp +++ b/examples/src/encoding_max_limit.cpp @@ -19,7 +19,7 @@ int main(int argc, char** argv) { dai::Pipeline p; // Define a source - color camera and 2 mono camera - auto colorCam = p.create(); + auto camRgb = p.create(); auto monoCam = p.create(); auto monoCam2 = p.create(); auto ve1 = p.create(); @@ -35,8 +35,8 @@ int main(int argc, char** argv) { ve3Out->setStreamName("ve3Out"); // Properties - colorCam->setBoardSocket(dai::CameraBoardSocket::RGB); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); monoCam->setBoardSocket(dai::CameraBoardSocket::LEFT); monoCam2->setBoardSocket(dai::CameraBoardSocket::RIGHT); @@ -47,7 +47,7 @@ int main(int argc, char** argv) { // Create outputs monoCam->out.link(ve1->input); - colorCam->video.link(ve2->input); + camRgb->video.link(ve2->input); monoCam2->out.link(ve3->input); ve1->bitstream.link(ve1Out->input); diff --git a/examples/src/image_manip_example.cpp b/examples/src/image_manip_example.cpp index cd562567c..88b0468a5 100644 --- a/examples/src/image_manip_example.cpp +++ b/examples/src/image_manip_example.cpp @@ -11,7 +11,7 @@ int main() { dai::Pipeline pipeline; - auto colorCam = pipeline.create(); + auto camRgb = pipeline.create(); auto imageManip = pipeline.create(); auto imageManip2 = pipeline.create(); auto camOut = pipeline.create(); @@ -24,10 +24,10 @@ int main() { manipOut2->setStreamName("manip2"); manip2In->setStreamName("manip2In"); - colorCam->setPreviewSize(304, 304); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + camRgb->setPreviewSize(304, 304); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); // Create a center crop image manipulation imageManip->initialConfig.setCenterCrop(0.7f); @@ -38,10 +38,10 @@ int main() { imageManip2->setWaitForConfigInput(true); // Link nodes CAM -> XLINK - colorCam->preview.link(camOut->input); + camRgb->preview.link(camOut->input); // Link nodes CAM -> imageManip -> XLINK - colorCam->preview.link(imageManip->inputImage); + camRgb->preview.link(imageManip->inputImage); imageManip->out.link(manipOut->input); // ImageManip -> ImageManip 2 diff --git a/examples/src/20_image_manip_warp.cpp b/examples/src/image_manip_warp.cpp similarity index 100% rename from examples/src/20_image_manip_warp.cpp rename to examples/src/image_manip_warp.cpp diff --git a/examples/src/mjpeg_encoding_example.cpp b/examples/src/mjpeg_encoding_example.cpp index 15c4c7bdc..3101e90ce 100644 --- a/examples/src/mjpeg_encoding_example.cpp +++ b/examples/src/mjpeg_encoding_example.cpp @@ -13,7 +13,7 @@ int main(int argc, char** argv) { dai::Pipeline p; - auto colorCam = p.create(); + auto camRgb = p.create(); auto xout = p.create(); auto xout2 = p.create(); auto videnc = p.create(); @@ -23,17 +23,17 @@ int main(int argc, char** argv) { xout2->setStreamName("preview"); // ColorCamera - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - // colorCam->setFps(5.0); - colorCam->setInterleaved(true); + camRgb->setPreviewSize(300, 300); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + // camRgb->setFps(5.0); + camRgb->setInterleaved(true); // VideoEncoder videnc->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::MJPEG); // Link plugins CAM -> XLINK - colorCam->video.link(videnc->input); - colorCam->preview.link(xout2->input); + camRgb->video.link(videnc->input); + camRgb->preview.link(xout2->input); videnc->bitstream.link(xout->input); // Connect to device with above created pipeline diff --git a/examples/src/14_2_mono_camera_control.cpp b/examples/src/mono_camera_control.cpp similarity index 100% rename from examples/src/14_2_mono_camera_control.cpp rename to examples/src/mono_camera_control.cpp diff --git a/examples/src/mono_camera_video.cpp b/examples/src/mono_camera_video.cpp new file mode 100644 index 000000000..838bbd8a6 --- /dev/null +++ b/examples/src/mono_camera_video.cpp @@ -0,0 +1,48 @@ +#include +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +int main() { + using namespace std; + + // Create pipeline + dai::Pipeline pipeline; + + // Define source and output + auto monoLeft = pipeline.create(); + auto xoutVideo = pipeline.create(); + + xoutVideo->setStreamName("video"); + + // Properties + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + + xoutVideo->input.setBlocking(false); + xoutVideo->input.setQueueSize(1); + + // Linking + monoLeft->out.link(xoutVideo->input); + + // Connect to device with above created pipeline + dai::Device device(pipeline); + // Start the pipeline + device.startPipeline(); + + auto video = device.getOutputQueue("video"); + + while(true) { + auto videoIn = video->get(); + + // Get BGR frame from NV12 encoded video frame to show with opencv + // Visualizing the frame on slower hosts might have overhead + cv::imshow("video", videoIn->getCvFrame()); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + return 0; + } + } + return 0; +} diff --git a/examples/src/10_mono_depth_mobilenetssd.cpp b/examples/src/mono_depth_mobilenetssd.cpp similarity index 100% rename from examples/src/10_mono_depth_mobilenetssd.cpp rename to examples/src/mono_depth_mobilenetssd.cpp diff --git a/examples/src/07_mono_full_resolution_saver.cpp b/examples/src/mono_full_resolution_saver.cpp similarity index 100% rename from examples/src/07_mono_full_resolution_saver.cpp rename to examples/src/mono_full_resolution_saver.cpp diff --git a/examples/src/09_mono_mobilenet.cpp b/examples/src/mono_mobilenet.cpp similarity index 100% rename from examples/src/09_mono_mobilenet.cpp rename to examples/src/mono_mobilenet.cpp diff --git a/examples/src/02_mono_preview.cpp b/examples/src/mono_preview.cpp similarity index 100% rename from examples/src/02_mono_preview.cpp rename to examples/src/mono_preview.cpp diff --git a/examples/src/29_1_object_tracker.cpp b/examples/src/object_tracker.cpp similarity index 60% rename from examples/src/29_1_object_tracker.cpp rename to examples/src/object_tracker.cpp index e55403d89..1d4608520 100644 --- a/examples/src/29_1_object_tracker.cpp +++ b/examples/src/object_tracker.cpp @@ -11,78 +11,78 @@ static const std::vector labelMap = {"background", "aeroplane", "bi "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; -static bool syncNN = true; +static std::atomic syncNN{true}; -dai::Pipeline createPipeline(std::string nnPath) { - dai::Pipeline p; +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; - auto colorCam = p.create(); - auto xlinkOut = p.create(); - auto detectionNetwork = p.create(); - auto objectTracker = p.create(); - auto trackerOut = p.create(); + // Define sources and outputs + auto camRgb = pipeline.create(); + auto detectionNetwork = pipeline.create(); + auto objectTracker = pipeline.create(); + + auto xlinkOut = pipeline.create(); + auto trackerOut = pipeline.create(); xlinkOut->setStreamName("preview"); trackerOut->setStreamName("tracklets"); - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); - colorCam->setFps(40); + // Properties + camRgb->setPreviewSize(300, 300); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + camRgb->setFps(40); // testing MobileNet DetectionNetwork - detectionNetwork->setConfidenceThreshold(0.5f); detectionNetwork->setBlobPath(nnPath); + detectionNetwork->setConfidenceThreshold(0.5f); + detectionNetwork->input.setBlocking(false); - // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(detectionNetwork->input); + objectTracker->setDetectionLabelsToTrack({15}); // track only person + // possible tracking types: ZERO_TERM_COLOR_HISTOGRAM, ZERO_TERM_IMAGELESS + objectTracker->setTrackerType(dai::TrackerType::ZERO_TERM_COLOR_HISTOGRAM); + // take the smallest ID when new object is tracked, possible options: SMALLEST_ID, UNIQUE_ID + objectTracker->setTrackerIdAssigmentPolicy(dai::TrackerIdAssigmentPolicy::SMALLEST_ID); + + // Linking + camRgb->preview.link(detectionNetwork->input); if(syncNN) { objectTracker->passthroughTrackerFrame.link(xlinkOut->input); } else { - colorCam->preview.link(xlinkOut->input); + camRgb->preview.link(xlinkOut->input); } - objectTracker->setDetectionLabelsToTrack({15}); // track only person - objectTracker->setTrackerType(dai::TrackerType::ZERO_TERM_COLOR_HISTOGRAM); - objectTracker->setTrackerIdAssigmentPolicy(dai::TrackerIdAssigmentPolicy::SMALLEST_ID); - detectionNetwork->passthrough.link(objectTracker->inputTrackerFrame); detectionNetwork->passthrough.link(objectTracker->inputDetectionFrame); detectionNetwork->out.link(objectTracker->inputDetections); objectTracker->out.link(trackerOut->input); - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createPipeline(nnPath); - // Connect to device with above created pipeline - dai::Device d(p); + dai::Device device(pipeline); // Start the pipeline - d.startPipeline(); + device.startPipeline(); - auto preview = d.getOutputQueue("preview", 4, false); - auto tracklets = d.getOutputQueue("tracklets", 4, false); + auto preview = device.getOutputQueue("preview", 4, false); + auto tracklets = device.getOutputQueue("tracklets", 4, false); auto startTime = steady_clock::now(); int counter = 0; float fps = 0; - while(1) { + while(true) { auto imgFrame = preview->get(); auto track = tracklets->get(); @@ -96,10 +96,10 @@ int main(int argc, char** argv) { } auto color = cv::Scalar(255, 0, 0); - cv::Mat trackletFrame = imgFrame->getCvFrame(); + cv::Mat frame = imgFrame->getCvFrame(); auto trackletsData = track->tracklets; for(auto& t : trackletsData) { - auto roi = t.roi.denormalize(trackletFrame.cols, trackletFrame.rows); + auto roi = t.roi.denormalize(frame.cols, frame.rows); int x1 = roi.topLeft().x; int y1 = roi.topLeft().y; int x2 = roi.bottomRight().x; @@ -110,29 +110,28 @@ int main(int argc, char** argv) { if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } - cv::putText(trackletFrame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream idStr; idStr << "ID: " << t.id; - cv::putText(trackletFrame, idStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, idStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream statusStr; statusStr << "Status: " << t.status; - cv::putText(trackletFrame, statusStr.str(), cv::Point(x1 + 10, y1 + 60), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, statusStr.str(), cv::Point(x1 + 10, y1 + 60), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - cv::rectangle(trackletFrame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } std::stringstream fpsStr; - fpsStr << std::fixed << std::setprecision(2) << fps; - cv::putText(trackletFrame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); + fpsStr << "NN fps:" << std::fixed << std::setprecision(2) << fps; + cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); - cv::imshow("tracker", trackletFrame); + cv::imshow("tracker", frame); int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } - return 0; -} \ No newline at end of file +} diff --git a/examples/src/24_opencv_support.cpp b/examples/src/opencv_support.cpp similarity index 100% rename from examples/src/24_opencv_support.cpp rename to examples/src/opencv_support.cpp diff --git a/examples/src/rgb_camera_video.cpp b/examples/src/rgb_camera_video.cpp new file mode 100644 index 000000000..1c58728fe --- /dev/null +++ b/examples/src/rgb_camera_video.cpp @@ -0,0 +1,50 @@ +#include +#include + +// Inludes common necessary includes for development using depthai library +#include "depthai/depthai.hpp" + +int main() { + using namespace std; + + // Create pipeline + dai::Pipeline pipeline; + + // Define source and output + auto camRgb = pipeline.create(); + auto xoutVideo = pipeline.create(); + + xoutVideo->setStreamName("video"); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setVideoSize(1920, 1080); + + xoutVideo->input.setBlocking(false); + xoutVideo->input.setQueueSize(1); + + // Linking + camRgb->video.link(xoutVideo->input); + + // Connect to device with above created pipeline + dai::Device device(pipeline); + // Start the pipeline + device.startPipeline(); + + auto video = device.getOutputQueue("video"); + + while(true) { + auto videoIn = video->get(); + + // Get BGR frame from NV12 encoded video frame to show with opencv + // Visualizing the frame on slower hosts might have overhead + cv::imshow("video", videoIn->getCvFrame()); + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + return 0; + } + } + return 0; +} diff --git a/examples/src/rgb_depth_aligned_example.cpp b/examples/src/rgb_depth_aligned_example.cpp index 76cfb496c..02918301a 100644 --- a/examples/src/rgb_depth_aligned_example.cpp +++ b/examples/src/rgb_depth_aligned_example.cpp @@ -13,51 +13,54 @@ static constexpr bool downscaleColor = true; int main() { using namespace std; - dai::Pipeline p; + dai::Pipeline pipeline; std::vector queueNames; - auto cam = p.create(); - cam->setBoardSocket(dai::CameraBoardSocket::RGB); - cam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - if(downscaleColor) cam->setIspScale(2, 3); - // For now, RGB needs fixed focus to properly align with depth. - // This value was used during calibration - cam->initialControl.setManualFocus(130); + // Define sources and outputs + auto camRgb = pipeline.create(); + auto left = pipeline.create(); + auto right = pipeline.create(); + auto stereo = pipeline.create(); + + auto rgbOut = pipeline.create(); + auto depthOut = pipeline.create(); - auto rgbOut = p.create(); rgbOut->setStreamName("rgb"); queueNames.push_back("rgb"); - cam->isp.link(rgbOut->input); + depthOut->setStreamName("depth"); + queueNames.push_back("depth"); + + // Properties + camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + if(downscaleColor) camRgb->setIspScale(2, 3); + // For now, RGB needs fixed focus to properly align with depth. + // This value was used during calibration + camRgb->initialControl.setManualFocus(130); - auto left = p.create(); left->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); left->setBoardSocket(dai::CameraBoardSocket::LEFT); - - auto right = p.create(); right->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); right->setBoardSocket(dai::CameraBoardSocket::RIGHT); - auto stereo = p.create(); stereo->setConfidenceThreshold(200); // LR-check is required for depth alignment stereo->setLeftRightCheck(true); stereo->setDepthAlign(dai::CameraBoardSocket::RGB); + + // Linking + camRgb->isp.link(rgbOut->input); left->out.link(stereo->left); right->out.link(stereo->right); - - auto depthOut = p.create(); - depthOut->setStreamName("depth"); - queueNames.push_back("depth"); - // Currently we use the 'disparity' output. TODO 'depth' stereo->disparity.link(depthOut->input); // Connect to device - dai::Device d(p); - d.startPipeline(); + dai::Device device(pipeline); + device.startPipeline(); // Sets queues size and behavior for(const auto& name : queueNames) { - d.getOutputQueue(name, 4, false); + device.getOutputQueue(name, 4, false); } std::unordered_map frame; @@ -65,9 +68,9 @@ int main() { while(1) { std::unordered_map> latestPacket; - auto queueEvents = d.getQueueEvents(queueNames); + auto queueEvents = device.getQueueEvents(queueNames); for(const auto& name : queueEvents) { - auto packets = d.getOutputQueue(name)->tryGetAll(); + auto packets = device.getOutputQueue(name)->tryGetAll(); auto count = packets.size(); if(count > 0) { latestPacket[name] = packets[count - 1]; @@ -103,8 +106,9 @@ int main() { } int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } + return 0; } diff --git a/examples/src/04_rgb_encoding.cpp b/examples/src/rgb_encoding.cpp similarity index 100% rename from examples/src/04_rgb_encoding.cpp rename to examples/src/rgb_encoding.cpp diff --git a/examples/src/18_rgb_encoding_mobilenet.cpp b/examples/src/rgb_encoding_mobilenet.cpp similarity index 100% rename from examples/src/18_rgb_encoding_mobilenet.cpp rename to examples/src/rgb_encoding_mobilenet.cpp diff --git a/examples/src/11_rgb_encoding_mono_mobilenet.cpp b/examples/src/rgb_encoding_mono_mobilenet.cpp similarity index 100% rename from examples/src/11_rgb_encoding_mono_mobilenet.cpp rename to examples/src/rgb_encoding_mono_mobilenet.cpp diff --git a/examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp similarity index 100% rename from examples/src/12_rgb_encoding_mono_mobilenet_depth.cpp rename to examples/src/rgb_encoding_mono_mobilenet_depth.cpp diff --git a/examples/src/06_rgb_full_resolution_saver.cpp b/examples/src/rgb_full_resolution_saver.cpp similarity index 100% rename from examples/src/06_rgb_full_resolution_saver.cpp rename to examples/src/rgb_full_resolution_saver.cpp diff --git a/examples/src/08_rgb_mobilenet.cpp b/examples/src/rgb_mobilenet.cpp similarity index 100% rename from examples/src/08_rgb_mobilenet.cpp rename to examples/src/rgb_mobilenet.cpp diff --git a/examples/src/15_rgb_mobilenet_4k.cpp b/examples/src/rgb_mobilenet_4k.cpp similarity index 100% rename from examples/src/15_rgb_mobilenet_4k.cpp rename to examples/src/rgb_mobilenet_4k.cpp diff --git a/examples/src/05_rgb_mono_encoding.cpp b/examples/src/rgb_mono_encoding.cpp similarity index 100% rename from examples/src/05_rgb_mono_encoding.cpp rename to examples/src/rgb_mono_encoding.cpp diff --git a/examples/src/01_rgb_preview.cpp b/examples/src/rgb_preview.cpp similarity index 100% rename from examples/src/01_rgb_preview.cpp rename to examples/src/rgb_preview.cpp diff --git a/examples/src/27_spatial_location_calculator.cpp b/examples/src/spatial_location_calculator.cpp similarity index 85% rename from examples/src/27_spatial_location_calculator.cpp rename to examples/src/spatial_location_calculator.cpp index 294c33d93..0f3c4a300 100644 --- a/examples/src/27_spatial_location_calculator.cpp +++ b/examples/src/spatial_location_calculator.cpp @@ -1,5 +1,3 @@ - - #include #include @@ -10,26 +8,29 @@ static constexpr float stepSize = 0.05; +static std::atomic newConfig{false}; + int main() { using namespace std; - dai::Pipeline p; + // Start defining a pipeline + dai::Pipeline pipeline; - auto monoLeft = p.create(); - auto monoRight = p.create(); - auto stereo = p.create(); - auto spatialDataCalculator = p.create(); + // Define sources and outputs + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto stereo = pipeline.create(); + auto spatialDataCalculator = pipeline.create(); - auto xoutDepth = p.create(); - auto xoutSpatialData = p.create(); - auto xinSpatialCalcConfig = p.create(); + auto xoutDepth = pipeline.create(); + auto xoutSpatialData = pipeline.create(); + auto xinSpatialCalcConfig = pipeline.create(); - // XLinkOut xoutDepth->setStreamName("depth"); xoutSpatialData->setStreamName("spatialData"); xinSpatialCalcConfig->setStreamName("spatialCalcConfig"); - // MonoCamera + // Properties monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); @@ -38,34 +39,34 @@ int main() { bool lrcheck = false; bool subpixel = false; - // StereoDepth stereo->setConfidenceThreshold(255); - - // stereo->setMedianFilter(dai::StereoDepthProperties::MedianFilter::MEDIAN_OFF); stereo->setLeftRightCheck(lrcheck); stereo->setSubpixel(subpixel); - // Link plugins CAM -> STEREO -> XLINK - monoLeft->out.link(stereo->left); - monoRight->out.link(stereo->right); - - spatialDataCalculator->passthroughDepth.link(xoutDepth->input); - stereo->depth.link(spatialDataCalculator->inputDepth); - + // Config dai::Point2f topLeft(0.4f, 0.4f); dai::Point2f bottomRight(0.6f, 0.6f); - spatialDataCalculator->setWaitForConfigInput(false); dai::SpatialLocationCalculatorConfigData config; config.depthThresholds.lowerThreshold = 100; config.depthThresholds.upperThreshold = 10000; config.roi = dai::Rect(topLeft, bottomRight); + + spatialDataCalculator->setWaitForConfigInput(false); spatialDataCalculator->initialConfig.addROI(config); + + // Linking + monoLeft->out.link(stereo->left); + monoRight->out.link(stereo->right); + + spatialDataCalculator->passthroughDepth.link(xoutDepth->input); + stereo->depth.link(spatialDataCalculator->inputDepth); + spatialDataCalculator->out.link(xoutSpatialData->input); xinSpatialCalcConfig->out.link(spatialDataCalculator->inputConfig); // CONNECT TO DEVICE - dai::Device d(p); + dai::Device d(pipeline); d.startPipeline(); auto depthQueue = d.getOutputQueue("depth", 8, false); @@ -76,10 +77,10 @@ int main() { auto color = cv::Scalar(255, 255, 255); std::cout << "Use WASD keys to move ROI!" << std::endl; - while(1) { - auto depth = depthQueue->get(); + while(true) { + auto inDepth = depthQueue->get(); - cv::Mat depthFrame = depth->getFrame(); + cv::Mat depthFrame = inDepth->getFrame(); cv::Mat depthFrameColor; cv::normalize(depthFrame, depthFrameColor, 255, 0, cv::NORM_INF, CV_8UC1); cv::equalizeHist(depthFrameColor, depthFrameColor); @@ -108,7 +109,6 @@ int main() { cv::imshow("depth", depthFrameColor); - bool newConfig = false; int key = cv::waitKey(1); switch(key) { case 'q': @@ -145,11 +145,14 @@ int main() { default: break; } + if(newConfig) { config.roi = dai::Rect(topLeft, bottomRight); dai::SpatialLocationCalculatorConfig cfg; cfg.addROI(config); spatialCalcConfigInQueue->send(cfg); + newConfig = false; } } + return 0; } diff --git a/examples/src/26_1_spatial_mobilenet.cpp b/examples/src/spatial_mobilenet.cpp similarity index 100% rename from examples/src/26_1_spatial_mobilenet.cpp rename to examples/src/spatial_mobilenet.cpp diff --git a/examples/src/26_2_spatial_mobilenet_mono.cpp b/examples/src/spatial_mobilenet_mono.cpp similarity index 65% rename from examples/src/26_2_spatial_mobilenet_mono.cpp rename to examples/src/spatial_mobilenet_mono.cpp index db1c67f86..ea749922f 100644 --- a/examples/src/26_2_spatial_mobilenet_mono.cpp +++ b/examples/src/spatial_mobilenet_mono.cpp @@ -11,32 +11,47 @@ static const std::vector labelMap = {"background", "aeroplane", "bi "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; -static bool syncNN = true; -static bool flipRectified = true; +static std::atomic syncNN{true}; +static std::atomic flipRectified{true}; -dai::Pipeline createPipeline(std::string nnPath) { - dai::Pipeline p; +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); - auto xlinkOut = p.create(); - auto spatialDetectionNetwork = p.create(); - auto imageManip = p.create(); + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto stereo = pipeline.create(); + auto spatialDetectionNetwork = pipeline.create(); + auto imageManip = pipeline.create(); - auto nnOut = p.create(); - auto depthRoiMap = p.create(); - auto xoutDepth = p.create(); + auto xoutManip = pipeline.create(); + auto nnOut = pipeline.create(); + auto depthRoiMap = pipeline.create(); + auto xoutDepth = pipeline.create(); - xlinkOut->setStreamName("preview"); + xoutManip->setStreamName("right"); nnOut->setStreamName("detections"); depthRoiMap->setStreamName("boundingBoxDepthMapping"); xoutDepth->setStreamName("depth"); + // Properties imageManip->initialConfig.setResize(300, 300); // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) imageManip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p); - auto monoLeft = p.create(); - auto monoRight = p.create(); - auto stereo = p.create(); monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); @@ -45,71 +60,51 @@ dai::Pipeline createPipeline(std::string nnPath) { // StereoDepth stereo->setConfidenceThreshold(255); - // Link plugins CAM -> STEREO -> XLINK - monoLeft->out.link(stereo->left); - monoRight->out.link(stereo->right); - - stereo->rectifiedRight.link(imageManip->inputImage); - - // testing MobileNet DetectionNetwork + //Define a neural network that will make predictions based on the source frames spatialDetectionNetwork->setConfidenceThreshold(0.5f); - spatialDetectionNetwork->setBoundingBoxScaleFactor(0.7); + spatialDetectionNetwork->setBlobPath(nnPath); + spatialDetectionNetwork->input.setBlocking(false); + spatialDetectionNetwork->setBoundingBoxScaleFactor(0.5); spatialDetectionNetwork->setDepthLowerThreshold(100); spatialDetectionNetwork->setDepthUpperThreshold(5000); - spatialDetectionNetwork->setBlobPath(nnPath); + // Linking + monoLeft->out.link(stereo->left); + monoRight->out.link(stereo->right); - // Link plugins CAM -> NN -> XLINK imageManip->out.link(spatialDetectionNetwork->input); if(syncNN) - spatialDetectionNetwork->passthrough.link(xlinkOut->input); + spatialDetectionNetwork->passthrough.link(xoutManip->input); else - imageManip->out.link(xlinkOut->input); + imageManip->out.link(xoutManip->input); spatialDetectionNetwork->out.link(nnOut->input); spatialDetectionNetwork->boundingBoxMapping.link(depthRoiMap->input); + stereo->rectifiedRight.link(imageManip->inputImage); stereo->depth.link(spatialDetectionNetwork->inputDepth); spatialDetectionNetwork->passthroughDepth.link(xoutDepth->input); - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createPipeline(nnPath); - // Connect to device with above created pipeline - dai::Device d(p); + dai::Device device(pipeline); // Start the pipeline - d.startPipeline(); + device.startPipeline(); - auto preview = d.getOutputQueue("preview", 4, false); - auto detections = d.getOutputQueue("detections", 4, false); - auto depthRoiMap = d.getOutputQueue("boundingBoxDepthMapping", 4, false); - auto depthQueue = d.getOutputQueue("depth", 4, false); + // Output queues will be used to get the rgb frames and nn data from the outputs defined above + auto previewQueue = device.getOutputQueue("right", 4, false); + auto detectionNNQueue = device.getOutputQueue("detections", 4, false); + auto depthRoiMapQueue = device.getOutputQueue("boundingBoxDepthMapping", 4, false); + auto depthQueue = device.getOutputQueue("depth", 4, false); auto startTime = steady_clock::now(); int counter = 0; float fps = 0; auto color = cv::Scalar(255, 255, 255); - while(1) { - auto inRectifiedRight = preview->get(); - auto det = detections->get(); - auto depth = depthQueue->get(); + while(true) { + auto inRectified = previewQueue->get(); + auto inDet = detectionNNQueue->get(); + auto inDepth = depthQueue->get(); counter++; auto currentTime = steady_clock::now(); @@ -120,16 +115,18 @@ int main(int argc, char** argv) { startTime = currentTime; } - auto dets = det->detections; + cv::Mat rectifiedRight = inRectified->getCvFrame(); + if(flipRectified) cv::flip(rectifiedRight, rectifiedRight, 1); - cv::Mat depthFrame = depth->getFrame(); + cv::Mat depthFrame = inDepth->getFrame(); cv::Mat depthFrameColor; cv::normalize(depthFrame, depthFrameColor, 255, 0, cv::NORM_INF, CV_8UC1); cv::equalizeHist(depthFrameColor, depthFrameColor); cv::applyColorMap(depthFrameColor, depthFrameColor, cv::COLORMAP_HOT); - if(!dets.empty()) { - auto boundingBoxMapping = depthRoiMap->get(); + auto detections = inDet->detections; + if(!detections.empty()) { + auto boundingBoxMapping = depthRoiMapQueue->get(); auto roiDatas = boundingBoxMapping->getConfigData(); for(auto roiData : roiDatas) { @@ -146,39 +143,35 @@ int main(int argc, char** argv) { } } - cv::Mat rectifiedRight = inRectifiedRight->getCvFrame(); - - if(flipRectified) cv::flip(rectifiedRight, rectifiedRight, 1); - - for(auto& d : dets) { + for(auto& detection : detections) { if(flipRectified) { - auto swap = d.xmin; - d.xmin = 1 - d.xmax; - d.xmax = 1 - swap; + auto swap = detection.xmin; + detection.xmin = 1 - detection.xmax; + detection.xmax = 1 - swap; } - int x1 = d.xmin * rectifiedRight.cols; - int y1 = d.ymin * rectifiedRight.rows; - int x2 = d.xmax * rectifiedRight.cols; - int y2 = d.ymax * rectifiedRight.rows; + int x1 = detection.xmin * rectifiedRight.cols; + int y1 = detection.ymin * rectifiedRight.rows; + int x2 = detection.xmax * rectifiedRight.cols; + int y2 = detection.ymax * rectifiedRight.rows; - int labelIndex = d.label; + int labelIndex = detection.label; std::string labelStr = to_string(labelIndex); if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } cv::putText(rectifiedRight, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << d.confidence * 100; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; cv::putText(rectifiedRight, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream depthX; - depthX << "X: " << (int)d.spatialCoordinates.x << " mm"; + depthX << "X: " << (int)detection.spatialCoordinates.x << " mm"; cv::putText(rectifiedRight, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream depthY; - depthY << "Y: " << (int)d.spatialCoordinates.y << " mm"; + depthY << "Y: " << (int)detection.spatialCoordinates.y << " mm"; cv::putText(rectifiedRight, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream depthZ; - depthZ << "Z: " << (int)d.spatialCoordinates.z << " mm"; + depthZ << "Z: " << (int)detection.spatialCoordinates.z << " mm"; cv::putText(rectifiedRight, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); cv::rectangle(rectifiedRight, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); @@ -190,11 +183,11 @@ int main(int argc, char** argv) { cv::imshow("depth", depthFrameColor); cv::imshow("rectified right", rectifiedRight); + int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key =='Q') { return 0; } } - return 0; } diff --git a/examples/src/29_2_spatial_object_tracker.cpp b/examples/src/spatial_object_tracker.cpp similarity index 93% rename from examples/src/29_2_spatial_object_tracker.cpp rename to examples/src/spatial_object_tracker.cpp index e720bb415..ca83f1fa4 100644 --- a/examples/src/29_2_spatial_object_tracker.cpp +++ b/examples/src/spatial_object_tracker.cpp @@ -17,7 +17,7 @@ dai::Pipeline createPipeline(std::string nnPath) { dai::Pipeline p; // create nodes - auto colorCam = p.create(); + auto camRgb = p.create(); auto spatialDetectionNetwork = p.create(); auto monoLeft = p.create(); auto monoRight = p.create(); @@ -31,10 +31,10 @@ dai::Pipeline createPipeline(std::string nnPath) { xoutRgb->setStreamName("preview"); trackerOut->setStreamName("tracklets"); - colorCam->setPreviewSize(300, 300); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + camRgb->setPreviewSize(300, 300); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); @@ -56,11 +56,11 @@ dai::Pipeline createPipeline(std::string nnPath) { monoRight->out.link(stereo->right); // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(spatialDetectionNetwork->input); + camRgb->preview.link(spatialDetectionNetwork->input); if(syncNN) { objectTracker->passthroughTrackerFrame.link(xoutRgb->input); } else { - colorCam->preview.link(xoutRgb->input); + camRgb->preview.link(xoutRgb->input); } objectTracker->setDetectionLabelsToTrack({15}); // track only person diff --git a/examples/src/26_3_spatial_tiny_yolo.cpp b/examples/src/spatial_tiny_yolo.cpp similarity index 72% rename from examples/src/26_3_spatial_tiny_yolo.cpp rename to examples/src/spatial_tiny_yolo.cpp index 81f1dda79..4254a6d59 100644 --- a/examples/src/26_3_spatial_tiny_yolo.cpp +++ b/examples/src/spatial_tiny_yolo.cpp @@ -20,31 +20,44 @@ static const std::vector labelMap = { static bool syncNN = true; -dai::Pipeline createPipeline(std::string nnPath) { - dai::Pipeline p; - - // create nodes - auto colorCam = p.create(); - auto spatialDetectionNetwork = p.create(); - auto monoLeft = p.create(); - auto monoRight = p.create(); - auto stereo = p.create(); - - // create xlink connections - auto xoutRgb = p.create(); - auto xoutNN = p.create(); - auto xoutBoundingBoxDepthMapping = p.create(); - auto xoutDepth = p.create(); - - xoutRgb->setStreamName("preview"); +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); + + // Create pipeline + dai::Pipeline pipeline; + + // Define sources and outputs + auto camRgb = pipeline.create(); + auto spatialDetectionNetwork = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto stereo = pipeline.create(); + + auto xoutRgb = pipeline.create(); + auto xoutNN = pipeline.create(); + auto xoutBoundingBoxDepthMapping = pipeline.create(); + auto xoutDepth = pipeline.create(); + + xoutRgb->setStreamName("rgb"); xoutNN->setStreamName("detections"); xoutBoundingBoxDepthMapping->setStreamName("boundingBoxDepthMapping"); xoutDepth->setStreamName("depth"); - colorCam->setPreviewSize(416, 416); - colorCam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - colorCam->setInterleaved(false); - colorCam->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); + // Properties + camRgb->setPreviewSize(416, 416); + camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + camRgb->setInterleaved(false); + camRgb->setColorOrder(dai::ColorCameraProperties::ColorOrder::BGR); monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); @@ -65,19 +78,18 @@ dai::Pipeline createPipeline(std::string nnPath) { spatialDetectionNetwork->setNumClasses(80); spatialDetectionNetwork->setCoordinateSize(4); spatialDetectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319}); - spatialDetectionNetwork->setAnchorMasks({{"side13", {3, 4, 5}}, {"side26", {1, 2, 3}}}); + spatialDetectionNetwork->setAnchorMasks({{"side26", {1, 2, 3}}, {"side13", {3, 4, 5}}}); spatialDetectionNetwork->setIouThreshold(0.5f); - // Link plugins CAM -> STEREO -> XLINK + // Linking monoLeft->out.link(stereo->left); monoRight->out.link(stereo->right); - // Link plugins CAM -> NN -> XLINK - colorCam->preview.link(spatialDetectionNetwork->input); + camRgb->preview.link(spatialDetectionNetwork->input); if(syncNN) spatialDetectionNetwork->passthrough.link(xoutRgb->input); else - colorCam->preview.link(xoutRgb->input); + camRgb->preview.link(xoutRgb->input); spatialDetectionNetwork->out.link(xoutNN->input); spatialDetectionNetwork->boundingBoxMapping.link(xoutBoundingBoxDepthMapping->input); @@ -85,55 +97,45 @@ dai::Pipeline createPipeline(std::string nnPath) { stereo->depth.link(spatialDetectionNetwork->inputDepth); spatialDetectionNetwork->passthroughDepth.link(xoutDepth->input); - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createPipeline(nnPath); - // Connect to device with above created pipeline - dai::Device d(p); + dai::Device device(pipeline); // Start the pipeline - d.startPipeline(); + device.startPipeline(); - auto preview = d.getOutputQueue("preview", 4, false); - auto detections = d.getOutputQueue("detections", 4, false); - auto xoutBoundingBoxDepthMapping = d.getOutputQueue("boundingBoxDepthMapping", 4, false); - auto depthQueue = d.getOutputQueue("depth", 4, false); + auto previewQueue = device.getOutputQueue("rgb", 4, false); + auto detectionNNQueue = device.getOutputQueue("detections", 4, false); + auto xoutBoundingBoxDepthMappingQueue = device.getOutputQueue("boundingBoxDepthMapping", 4, false); + auto depthQueue = device.getOutputQueue("depth", 4, false); auto startTime = steady_clock::now(); int counter = 0; float fps = 0; auto color = cv::Scalar(255, 255, 255); - while(1) { - auto imgFrame = preview->get(); - auto det = detections->get(); + while(true) { + auto imgFrame = previewQueue->get(); + auto inDet = detectionNNQueue->get(); auto depth = depthQueue->get(); - auto dets = det->detections; - + cv::Mat frame = imgFrame->getCvFrame(); cv::Mat depthFrame = depth->getFrame(); cv::Mat depthFrameColor; cv::normalize(depthFrame, depthFrameColor, 255, 0, cv::NORM_INF, CV_8UC1); cv::equalizeHist(depthFrameColor, depthFrameColor); cv::applyColorMap(depthFrameColor, depthFrameColor, cv::COLORMAP_HOT); - if(!dets.empty()) { - auto boundingBoxMapping = xoutBoundingBoxDepthMapping->get(); + counter++; + auto currentTime = steady_clock::now(); + auto elapsed = duration_cast>(currentTime - startTime); + if(elapsed > seconds(1)) { + fps = counter / elapsed.count(); + counter = 0; + startTime = currentTime; + } + + auto detections = inDet->detections; + if(!detections.empty()) { + auto boundingBoxMapping = xoutBoundingBoxDepthMappingQueue->get(); auto roiDatas = boundingBoxMapping->getConfigData(); for(auto roiData : roiDatas) { @@ -149,41 +151,31 @@ int main(int argc, char** argv) { cv::rectangle(depthFrameColor, cv::Rect(cv::Point(xmin, ymin), cv::Point(xmax, ymax)), color, cv::FONT_HERSHEY_SIMPLEX); } } - counter++; - auto currentTime = steady_clock::now(); - auto elapsed = duration_cast>(currentTime - startTime); - if(elapsed > seconds(1)) { - fps = counter / elapsed.count(); - counter = 0; - startTime = currentTime; - } - cv::Mat frame = imgFrame->getCvFrame(); + for(const auto& detection : detections) { + int x1 = detection.xmin * frame.cols; + int y1 = detection.ymin * frame.rows; + int x2 = detection.xmax * frame.cols; + int y2 = detection.ymax * frame.rows; - for(const auto& d : dets) { - int x1 = d.xmin * frame.cols; - int y1 = d.ymin * frame.rows; - int x2 = d.xmax * frame.cols; - int y2 = d.ymax * frame.rows; - - int labelIndex = d.label; + int labelIndex = detection.label; std::string labelStr = to_string(labelIndex); if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << d.confidence * 100; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream depthX; - depthX << "X: " << (int)d.spatialCoordinates.x << " mm"; + depthX << "X: " << (int)detection.spatialCoordinates.x << " mm"; cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream depthY; - depthY << "Y: " << (int)d.spatialCoordinates.y << " mm"; + depthY << "Y: " << (int)detection.spatialCoordinates.y << " mm"; cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); std::stringstream depthZ; - depthZ << "Z: " << (int)d.spatialCoordinates.z << " mm"; + depthZ << "Z: " << (int)detection.spatialCoordinates.z << " mm"; cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); @@ -194,12 +186,12 @@ int main(int argc, char** argv) { cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); cv::imshow("depth", depthFrameColor); - cv::imshow("preview", frame); + cv::imshow("rgb", frame); + int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } - return 0; } diff --git a/examples/src/30_2_stereo_depth_video.cpp b/examples/src/stereo_depth_video.cpp similarity index 99% rename from examples/src/30_2_stereo_depth_video.cpp rename to examples/src/stereo_depth_video.cpp index ee0a43448..f61fc6382 100644 --- a/examples/src/30_2_stereo_depth_video.cpp +++ b/examples/src/stereo_depth_video.cpp @@ -1,5 +1,3 @@ - - #include #include diff --git a/examples/src/25_system_information.cpp b/examples/src/system_information.cpp similarity index 100% rename from examples/src/25_system_information.cpp rename to examples/src/system_information.cpp diff --git a/examples/src/22_1_tiny_yolo_v3_device_side_decoding.cpp b/examples/src/tiny_yolo_v3_device_side_decoding.cpp similarity index 100% rename from examples/src/22_1_tiny_yolo_v3_device_side_decoding.cpp rename to examples/src/tiny_yolo_v3_device_side_decoding.cpp diff --git a/examples/src/22_2_tiny_yolo_v4_device_side_decoding.cpp b/examples/src/tiny_yolo_v4_device_side_decoding.cpp similarity index 100% rename from examples/src/22_2_tiny_yolo_v4_device_side_decoding.cpp rename to examples/src/tiny_yolo_v4_device_side_decoding.cpp diff --git a/examples/src/17_video_mobilenet.cpp b/examples/src/video_mobilenet.cpp similarity index 100% rename from examples/src/17_video_mobilenet.cpp rename to examples/src/video_mobilenet.cpp From 14f6b6ee9066c509cc4a344a03135fe917e072a6 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Mon, 3 May 2021 19:07:17 +0300 Subject: [PATCH 23/56] Disable median filter to avoid warning --- examples/src/stereo_depth_video.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/src/stereo_depth_video.cpp b/examples/src/stereo_depth_video.cpp index 3ac531044..a95b3da56 100644 --- a/examples/src/stereo_depth_video.cpp +++ b/examples/src/stereo_depth_video.cpp @@ -59,7 +59,7 @@ int main() { // stereo->loadCalibrationFile("../../../../depthai/resources/depthai.calib"); // stereo->setInputResolution(1280, 720); // TODO: median filtering is disabled on device with (lrcheck || extended || subpixel) - // stereo->setMedianFilter(dai::StereoDepthProperties::MedianFilter::MEDIAN_OFF); + stereo->setMedianFilter(dai::StereoDepthProperties::MedianFilter::MEDIAN_OFF); stereo->setLeftRightCheck(lrcheck); stereo->setExtendedDisparity(extended); stereo->setSubpixel(subpixel); From 58a771356f6b7a24d5497f8faf25f1144dcf29ff Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Mon, 3 May 2021 23:19:17 +0300 Subject: [PATCH 24/56] Remove whitespaces --- .../src/camera_mobilenet_sync_example.cpp | 2 + examples/src/device_queue_event.cpp | 1 - examples/src/encoding_max_limit.cpp | 30 ++++---- examples/src/image_manip_example.cpp | 4 +- examples/src/mjpeg_encoding_example.cpp | 17 ++--- examples/src/mono_depth_mobilenetssd.cpp | 1 - examples/src/mono_mobilenet.cpp | 1 - examples/src/object_tracker.cpp | 4 +- examples/src/opencv_support.cpp | 4 +- examples/src/rgb_camera_video.cpp | 1 - examples/src/rgb_depth_aligned_example.cpp | 2 +- examples/src/rgb_encoding_mobilenet.cpp | 2 - examples/src/rgb_encoding_mono_mobilenet.cpp | 1 - .../src/rgb_encoding_mono_mobilenet_depth.cpp | 1 - examples/src/rgb_mobilenet.cpp | 1 - examples/src/rgb_mobilenet_4k.cpp | 1 - examples/src/spatial_location_calculator.cpp | 10 +-- examples/src/spatial_object_tracker.cpp | 75 +++++++++---------- examples/src/stereo_depth_video.cpp | 39 +++++----- examples/src/video_mobilenet.cpp | 1 - examples/src/webcam_mobilenet_example.cpp | 16 ++-- 21 files changed, 98 insertions(+), 116 deletions(-) diff --git a/examples/src/camera_mobilenet_sync_example.cpp b/examples/src/camera_mobilenet_sync_example.cpp index 685f7ded0..e6220f0a8 100644 --- a/examples/src/camera_mobilenet_sync_example.cpp +++ b/examples/src/camera_mobilenet_sync_example.cpp @@ -21,8 +21,10 @@ int main(int argc, char** argv) { nnPath = std::string(argv[1]); } + // Create pipeline dai::Pipeline pipeline; + // Define sources and outputs auto camRgb = pipeline.create(); auto nn = pipeline.create(); auto camOut = pipeline.create(); diff --git a/examples/src/device_queue_event.cpp b/examples/src/device_queue_event.cpp index f8ba0e57f..e63dd2ca4 100644 --- a/examples/src/device_queue_event.cpp +++ b/examples/src/device_queue_event.cpp @@ -1,4 +1,3 @@ - #include #include #include "utility.hpp" diff --git a/examples/src/encoding_max_limit.cpp b/examples/src/encoding_max_limit.cpp index f5e15d2a0..80ef00b9d 100644 --- a/examples/src/encoding_max_limit.cpp +++ b/examples/src/encoding_max_limit.cpp @@ -16,19 +16,19 @@ int main(int argc, char** argv) { using namespace std::chrono; std::signal(SIGINT, &sigintHandler); - dai::Pipeline p; + dai::Pipeline pipeline; // Define a source - color camera and 2 mono camera - auto camRgb = p.create(); - auto monoCam = p.create(); - auto monoCam2 = p.create(); - auto ve1 = p.create(); - auto ve2 = p.create(); - auto ve3 = p.create(); + auto camRgb = pipeline.create(); + auto monoCam = pipeline.create(); + auto monoCam2 = pipeline.create(); + auto ve1 = pipeline.create(); + auto ve2 = pipeline.create(); + auto ve3 = pipeline.create(); - auto ve1Out = p.create(); - auto ve2Out = p.create(); - auto ve3Out = p.create(); + auto ve1Out = pipeline.create(); + auto ve2Out = pipeline.create(); + auto ve3Out = pipeline.create(); ve1Out->setStreamName("ve1Out"); ve2Out->setStreamName("ve2Out"); @@ -55,14 +55,14 @@ int main(int argc, char** argv) { ve3->bitstream.link(ve3Out->input); // Pipeline is defined, now we can connect to the device - dai::Device d(p); + dai::Device device(pipeline); // Start pipeline - d.startPipeline(); + device.startPipeline(); // Output queues will be used to get the encoded data from the output defined above - auto outQ1 = d.getOutputQueue("ve1Out", 30, true); - auto outQ2 = d.getOutputQueue("ve2Out", 30, true); - auto outQ3 = d.getOutputQueue("ve3Out", 30, true); + auto outQ1 = device.getOutputQueue("ve1Out", 30, true); + auto outQ2 = device.getOutputQueue("ve2Out", 30, true); + auto outQ3 = device.getOutputQueue("ve3Out", 30, true); // The .h264 / .h265 files are raw stream files (not playable yet) auto videoFile1 = std::ofstream("mono1.h264", std::ios::binary); diff --git a/examples/src/image_manip_example.cpp b/examples/src/image_manip_example.cpp index ab3a70b0b..866077ce9 100644 --- a/examples/src/image_manip_example.cpp +++ b/examples/src/image_manip_example.cpp @@ -1,5 +1,3 @@ - - #include #include @@ -66,6 +64,7 @@ int main() { int frameCounter = 0; float xmin = 0.1f; float xmax = 0.3f; + while(true) { xmin += 0.003f; xmax += 0.003f; @@ -99,4 +98,5 @@ int main() { frameCounter++; } + return 0; } diff --git a/examples/src/mjpeg_encoding_example.cpp b/examples/src/mjpeg_encoding_example.cpp index 177144608..d365e0899 100644 --- a/examples/src/mjpeg_encoding_example.cpp +++ b/examples/src/mjpeg_encoding_example.cpp @@ -1,4 +1,3 @@ - #include #include @@ -11,12 +10,12 @@ int main(int argc, char** argv) { using namespace std; using namespace std::chrono; - dai::Pipeline p; + dai::Pipeline pipeline; - auto camRgb = p.create(); - auto xout = p.create(); - auto xout2 = p.create(); - auto videnc = p.create(); + auto camRgb = pipeline.create(); + auto xout = pipeline.create(); + auto xout2 = pipeline.create(); + auto videnc = pipeline.create(); // XLinkOut xout->setStreamName("mjpeg"); @@ -37,10 +36,10 @@ int main(int argc, char** argv) { videnc->bitstream.link(xout->input); // Connect and start the pipeline - dai::Device d(p); + dai::Device device(pipeline); - auto mjpegQueue = d.getOutputQueue("mjpeg", 8, false); - auto previewQueue = d.getOutputQueue("preview", 8, false); + auto mjpegQueue = device.getOutputQueue("mjpeg", 8, false); + auto previewQueue = device.getOutputQueue("preview", 8, false); while(1) { auto t1 = steady_clock::now(); auto preview = previewQueue->get(); diff --git a/examples/src/mono_depth_mobilenetssd.cpp b/examples/src/mono_depth_mobilenetssd.cpp index 90ea6cd59..0f4e620d3 100644 --- a/examples/src/mono_depth_mobilenetssd.cpp +++ b/examples/src/mono_depth_mobilenetssd.cpp @@ -75,7 +75,6 @@ int main(int argc, char** argv) { // Connect to device with above created pipeline dai::Device device(pipeline); - // Output queues will be used to get the grayscale / depth frames and nn data from the outputs defined above auto qRight = device.getOutputQueue("rectifiedRight", 4, false); auto qDisparity = device.getOutputQueue("disparity", 4, false); diff --git a/examples/src/mono_mobilenet.cpp b/examples/src/mono_mobilenet.cpp index 191f84e50..c15685340 100644 --- a/examples/src/mono_mobilenet.cpp +++ b/examples/src/mono_mobilenet.cpp @@ -59,7 +59,6 @@ int main(int argc, char** argv) { // Connect to device with above created pipeline dai::Device device(pipeline); - // Output queues will be used to get the grayscale frames and nn data from the outputs defined above auto qRight = device.getOutputQueue("right", 4, false); auto qDet = device.getOutputQueue("nn", 4, false); diff --git a/examples/src/object_tracker.cpp b/examples/src/object_tracker.cpp index f8da085c4..c34fbb599 100644 --- a/examples/src/object_tracker.cpp +++ b/examples/src/object_tracker.cpp @@ -2,8 +2,6 @@ #include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" @@ -74,13 +72,13 @@ int main(int argc, char** argv) { // Connect to device with above created pipeline dai::Device device(pipeline); - auto preview = device.getOutputQueue("preview", 4, false); auto tracklets = device.getOutputQueue("tracklets", 4, false); auto startTime = steady_clock::now(); int counter = 0; float fps = 0; + while(true) { auto imgFrame = preview->get(); auto track = tracklets->get(); diff --git a/examples/src/opencv_support.cpp b/examples/src/opencv_support.cpp index c6bf0f162..fa6321325 100644 --- a/examples/src/opencv_support.cpp +++ b/examples/src/opencv_support.cpp @@ -36,8 +36,6 @@ int main() { // Connect to device with above created pipeline dai::Device device(pipeline); - - auto video = device.getOutputQueue("video"); auto preview = device.getOutputQueue("preview"); @@ -56,4 +54,4 @@ int main() { return 0; } return 0; -} \ No newline at end of file +} diff --git a/examples/src/rgb_camera_video.cpp b/examples/src/rgb_camera_video.cpp index 8f396dfd7..00146e0aa 100644 --- a/examples/src/rgb_camera_video.cpp +++ b/examples/src/rgb_camera_video.cpp @@ -30,7 +30,6 @@ int main() { // Connect to device with above created pipeline dai::Device device(pipeline); - auto video = device.getOutputQueue("video"); while(true) { diff --git a/examples/src/rgb_depth_aligned_example.cpp b/examples/src/rgb_depth_aligned_example.cpp index 8dd4a74bc..0474dd15e 100644 --- a/examples/src/rgb_depth_aligned_example.cpp +++ b/examples/src/rgb_depth_aligned_example.cpp @@ -64,7 +64,7 @@ int main() { std::unordered_map frame; - while(1) { + while(true) { std::unordered_map> latestPacket; auto queueEvents = device.getQueueEvents(queueNames); diff --git a/examples/src/rgb_encoding_mobilenet.cpp b/examples/src/rgb_encoding_mobilenet.cpp index dc6d01a2e..67d0dfc24 100644 --- a/examples/src/rgb_encoding_mobilenet.cpp +++ b/examples/src/rgb_encoding_mobilenet.cpp @@ -11,7 +11,6 @@ static const std::vector labelMap = {"background", "aeroplane", "bi "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; - int main(int argc, char** argv) { using namespace std; // Default blob path provided by Hunter private data download @@ -65,7 +64,6 @@ int main(int argc, char** argv) { // Connect to device with above created pipeline dai::Device device(pipeline); - // Queues int queueSize = 8; auto qRgb = device.getOutputQueue("rgb", queueSize); diff --git a/examples/src/rgb_encoding_mono_mobilenet.cpp b/examples/src/rgb_encoding_mono_mobilenet.cpp index 4bc10c359..767a8a16d 100644 --- a/examples/src/rgb_encoding_mono_mobilenet.cpp +++ b/examples/src/rgb_encoding_mono_mobilenet.cpp @@ -73,7 +73,6 @@ int main(int argc, char** argv) { // Connect to device with above created pipeline dai::Device device(pipeline); - // Queues int queueSize = 8; auto qRight = device.getOutputQueue("right", queueSize); diff --git a/examples/src/rgb_encoding_mono_mobilenet_depth.cpp b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp index 965bc0fb4..fa1f5f9a9 100644 --- a/examples/src/rgb_encoding_mono_mobilenet_depth.cpp +++ b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp @@ -89,7 +89,6 @@ int main(int argc, char** argv) { // Connect to device with above created pipeline dai::Device device(pipeline); - // Queues int queueSize = 8; auto qRight = device.getOutputQueue("right", queueSize); diff --git a/examples/src/rgb_mobilenet.cpp b/examples/src/rgb_mobilenet.cpp index 08c7ad069..bff419b26 100644 --- a/examples/src/rgb_mobilenet.cpp +++ b/examples/src/rgb_mobilenet.cpp @@ -64,7 +64,6 @@ int main(int argc, char** argv) { // Connect to device with above created pipeline dai::Device device(pipeline); - // Output queues will be used to get the rgb frames and nn data from the outputs defined above auto qRgb = device.getOutputQueue("rgb", 4, false); auto qDet = device.getOutputQueue("nn", 4, false); diff --git a/examples/src/rgb_mobilenet_4k.cpp b/examples/src/rgb_mobilenet_4k.cpp index fb7667c55..9165a2c91 100644 --- a/examples/src/rgb_mobilenet_4k.cpp +++ b/examples/src/rgb_mobilenet_4k.cpp @@ -60,7 +60,6 @@ int main(int argc, char** argv) { // Connect to device with above created pipeline dai::Device device(pipeline); - // Queues auto qVideo = device.getOutputQueue("video", 4, false); auto qPreview = device.getOutputQueue("preview", 4, false); diff --git a/examples/src/spatial_location_calculator.cpp b/examples/src/spatial_location_calculator.cpp index b96a28c1d..316c9b996 100644 --- a/examples/src/spatial_location_calculator.cpp +++ b/examples/src/spatial_location_calculator.cpp @@ -66,11 +66,11 @@ int main() { xinSpatialCalcConfig->out.link(spatialDataCalculator->inputConfig); // CONNECT TO DEVICE - dai::Device d(pipeline); + dai::Device device(pipeline); - auto depthQueue = d.getOutputQueue("depth", 8, false); - auto spatialCalcQueue = d.getOutputQueue("spatialData", 8, false); - auto spatialCalcConfigInQueue = d.getInputQueue("spatialCalcConfig"); + auto depthQueue = device.getOutputQueue("depth", 8, false); + auto spatialCalcQueue = device.getOutputQueue("spatialData", 8, false); + auto spatialCalcConfigInQueue = device.getInputQueue("spatialCalcConfig"); cv::Mat depthFrame; auto color = cv::Scalar(255, 255, 255); @@ -134,7 +134,7 @@ int main() { newConfig = true; } break; - case 'd': + case 'device': if(bottomRight.x + stepSize <= 1) { topLeft.x += stepSize; bottomRight.x += stepSize; diff --git a/examples/src/spatial_object_tracker.cpp b/examples/src/spatial_object_tracker.cpp index 46cf45b7b..0d02a0c34 100644 --- a/examples/src/spatial_object_tracker.cpp +++ b/examples/src/spatial_object_tracker.cpp @@ -13,24 +13,37 @@ static const std::vector labelMap = {"background", "aeroplane", "bi static bool syncNN = true; -dai::Pipeline createPipeline(std::string nnPath) { - dai::Pipeline p; +int main(int argc, char** argv) { + using namespace std; + using namespace std::chrono; + std::string nnPath(BLOB_PATH); + + // If path to blob specified, use that + if(argc > 1) { + nnPath = std::string(argv[1]); + } + + // Print which blob we are using + printf("Using blob at path: %s\n", nnPath.c_str()); - // create nodes - auto camRgb = p.create(); - auto spatialDetectionNetwork = p.create(); - auto monoLeft = p.create(); - auto monoRight = p.create(); - auto stereo = p.create(); - auto objectTracker = p.create(); + // Create pipeline + dai::Pipeline pipeline; - // create xlink connections - auto xoutRgb = p.create(); - auto trackerOut = p.create(); + // Define sources and outputs + auto camRgb = pipeline.create(); + auto spatialDetectionNetwork = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto stereo = pipeline.create(); + auto objectTracker = pipeline.create(); + + auto xoutRgb = pipeline.create(); + auto trackerOut = pipeline.create(); xoutRgb->setStreamName("preview"); trackerOut->setStreamName("tracklets"); + // Properties camRgb->setPreviewSize(300, 300); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); camRgb->setInterleaved(false); @@ -51,11 +64,14 @@ dai::Pipeline createPipeline(std::string nnPath) { spatialDetectionNetwork->setDepthLowerThreshold(100); spatialDetectionNetwork->setDepthUpperThreshold(5000); - // Link plugins CAM -> STEREO -> XLINK + objectTracker->setDetectionLabelsToTrack({15}); // track only person + objectTracker->setTrackerType(dai::TrackerType::ZERO_TERM_COLOR_HISTOGRAM); + objectTracker->setTrackerIdAssigmentPolicy(dai::TrackerIdAssigmentPolicy::SMALLEST_ID); + + // Linking monoLeft->out.link(stereo->left); monoRight->out.link(stereo->right); - // Link plugins CAM -> NN -> XLINK camRgb->preview.link(spatialDetectionNetwork->input); if(syncNN) { objectTracker->passthroughTrackerFrame.link(xoutRgb->input); @@ -63,9 +79,6 @@ dai::Pipeline createPipeline(std::string nnPath) { camRgb->preview.link(xoutRgb->input); } - objectTracker->setDetectionLabelsToTrack({15}); // track only person - objectTracker->setTrackerType(dai::TrackerType::ZERO_TERM_COLOR_HISTOGRAM); - objectTracker->setTrackerIdAssigmentPolicy(dai::TrackerIdAssigmentPolicy::SMALLEST_ID); objectTracker->out.link(trackerOut->input); spatialDetectionNetwork->passthrough.link(objectTracker->inputTrackerFrame); @@ -74,37 +87,19 @@ dai::Pipeline createPipeline(std::string nnPath) { stereo->depth.link(spatialDetectionNetwork->inputDepth); - return p; -} - -int main(int argc, char** argv) { - using namespace std; - using namespace std::chrono; - std::string nnPath(BLOB_PATH); - - // If path to blob specified, use that - if(argc > 1) { - nnPath = std::string(argv[1]); - } - - // Print which blob we are using - printf("Using blob at path: %s\n", nnPath.c_str()); - - // Create pipeline - dai::Pipeline p = createPipeline(nnPath); // Connect and start the pipeline - dai::Device d(p); + dai::Device device(pipeline); - auto preview = d.getOutputQueue("preview", 4, false); - auto tracklets = d.getOutputQueue("tracklets", 4, false); + auto preview = device.getOutputQueue("preview", 4, false); + auto tracklets = device.getOutputQueue("tracklets", 4, false); auto startTime = steady_clock::now(); int counter = 0; float fps = 0; auto color = cv::Scalar(255, 0, 0); - while(1) { + while(true) { auto imgFrame = preview->get(); auto track = tracklets->get(); @@ -158,11 +153,11 @@ int main(int argc, char** argv) { cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); cv::imshow("tracker", frame); + int key = cv::waitKey(1); if(key == 'q') { return 0; } } - return 0; } diff --git a/examples/src/stereo_depth_video.cpp b/examples/src/stereo_depth_video.cpp index a95b3da56..3ffbd4f59 100644 --- a/examples/src/stereo_depth_video.cpp +++ b/examples/src/stereo_depth_video.cpp @@ -12,17 +12,17 @@ int main() { // TODO - split this example into two separate examples bool withDepth = true; - dai::Pipeline p; - - auto monoLeft = p.create(); - auto monoRight = p.create(); - auto xoutLeft = p.create(); - auto xoutRight = p.create(); - auto stereo = withDepth ? p.create() : nullptr; - auto xoutDisp = p.create(); - auto xoutDepth = p.create(); - auto xoutRectifL = p.create(); - auto xoutRectifR = p.create(); + dai::Pipeline pipeline; + + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); + auto xoutLeft = pipeline.create(); + auto xoutRight = pipeline.create(); + auto stereo = withDepth ? pipeline.create() : nullptr; + auto xoutDisp = pipeline.create(); + auto xoutDepth = pipeline.create(); + auto xoutRectifL = pipeline.create(); + auto xoutRectifR = pipeline.create(); // XLinkOut xoutLeft->setStreamName("left"); @@ -86,14 +86,14 @@ int main() { } // Connect and start the pipeline - dai::Device d(p); + dai::Device device(pipeline); - auto leftQueue = d.getOutputQueue("left", 8, false); - auto rightQueue = d.getOutputQueue("right", 8, false); - auto dispQueue = withDepth ? d.getOutputQueue("disparity", 8, false) : nullptr; - auto depthQueue = withDepth ? d.getOutputQueue("depth", 8, false) : nullptr; - auto rectifLeftQueue = withDepth ? d.getOutputQueue("rectified_left", 8, false) : nullptr; - auto rectifRightQueue = withDepth ? d.getOutputQueue("rectified_right", 8, false) : nullptr; + auto leftQueue = device.getOutputQueue("left", 8, false); + auto rightQueue = device.getOutputQueue("right", 8, false); + auto dispQueue = withDepth ? device.getOutputQueue("disparity", 8, false) : nullptr; + auto depthQueue = withDepth ? device.getOutputQueue("depth", 8, false) : nullptr; + auto rectifLeftQueue = withDepth ? device.getOutputQueue("rectified_left", 8, false) : nullptr; + auto rectifRightQueue = withDepth ? device.getOutputQueue("rectified_right", 8, false) : nullptr; while(1) { auto left = leftQueue->get(); @@ -131,8 +131,9 @@ int main() { } int key = cv::waitKey(1); - if(key == 'q') { + if(key == 'q' || key == 'Q') { return 0; } } + return 0; } diff --git a/examples/src/video_mobilenet.cpp b/examples/src/video_mobilenet.cpp index 719fa4436..75664bc30 100644 --- a/examples/src/video_mobilenet.cpp +++ b/examples/src/video_mobilenet.cpp @@ -53,7 +53,6 @@ int main(int argc, char** argv) { // Connect to device with above created pipeline dai::Device device(pipeline); - // Input queue will be used to send video frames to the device. auto qIn = device.getInputQueue("inFrame"); // Output queue will be used to get nn data from the video frames. diff --git a/examples/src/webcam_mobilenet_example.cpp b/examples/src/webcam_mobilenet_example.cpp index 3a517ce7a..ef7eea9d3 100644 --- a/examples/src/webcam_mobilenet_example.cpp +++ b/examples/src/webcam_mobilenet_example.cpp @@ -26,11 +26,11 @@ int main(int argc, char** argv) { using namespace std; // CREATE PIPELINE - dai::Pipeline p; + dai::Pipeline pipeline; - auto xin = p.create(); - auto nn = p.create(); - auto xout = p.create(); + auto xin = pipeline.create(); + auto nn = pipeline.create(); + auto xout = pipeline.create(); // Properties nn->setBlobPath(nnPath); @@ -49,11 +49,11 @@ int main(int argc, char** argv) { cv::VideoCapture webcam(camId); // Connect to device with above created pipeline - dai::Device d(p); + dai::Device device(pipeline); cv::Mat frame; - auto in = d.getInputQueue("nn_in"); - auto detections = d.getOutputQueue("nn_out"); + auto in = device.getInputQueue("nn_in"); + auto detections = device.getOutputQueue("nn_out"); while(1) { // data to send further @@ -120,11 +120,11 @@ int main(int argc, char** argv) { } cv::imshow("preview", frame); + int key = cv::waitKey(1); if(key == 'q') { return 0; } } - return 0; } From 3b8caceba496886bcfef6c3bab7c5c0c350f2e03 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Tue, 4 May 2021 19:21:48 +0300 Subject: [PATCH 25/56] Comments --- examples/src/color_camera_control.cpp | 2 +- examples/src/depth_crop_control.cpp | 2 +- examples/src/depth_preview.cpp | 2 +- examples/src/device_queue_event.cpp | 2 +- examples/src/encoding_max_limit.cpp | 4 +--- examples/src/mjpeg_encoding_example.cpp | 4 ++-- examples/src/mono_camera_control.cpp | 2 +- examples/src/mono_camera_video.cpp | 2 +- examples/src/mono_depth_mobilenetssd.cpp | 2 +- examples/src/mono_full_resolution_saver.cpp | 2 +- examples/src/mono_mobilenet.cpp | 2 +- examples/src/mono_preview.cpp | 1 + examples/src/object_tracker.cpp | 2 +- examples/src/opencv_support.cpp | 2 +- examples/src/rgb_camera_video.cpp | 2 +- examples/src/rgb_depth_aligned_example.cpp | 2 +- examples/src/rgb_encoding.cpp | 2 +- examples/src/rgb_encoding_mobilenet.cpp | 2 +- examples/src/rgb_encoding_mono_mobilenet.cpp | 2 +- .../src/rgb_encoding_mono_mobilenet_depth.cpp | 2 +- examples/src/rgb_full_resolution_saver.cpp | 2 +- examples/src/rgb_mobilenet.cpp | 2 +- examples/src/rgb_mobilenet_4k.cpp | 2 +- examples/src/rgb_mono_encoding.cpp | 2 +- examples/src/rgb_preview.cpp | 2 +- examples/src/spatial_location_calculator.cpp | 2 +- examples/src/spatial_mobilenet.cpp | 2 +- examples/src/spatial_mobilenet_mono.cpp | 2 +- examples/src/spatial_object_tracker.cpp | 2 +- examples/src/spatial_tiny_yolo.cpp | 2 +- examples/src/stereo_depth_video.cpp | 4 ++-- examples/src/system_information.cpp | 2 +- .../src/tiny_yolo_v3_device_side_decoding.cpp | 2 +- .../src/tiny_yolo_v4_device_side_decoding.cpp | 2 +- examples/src/video_mobilenet.cpp | 4 ++-- examples/src/webcam_mobilenet_example.cpp | 17 +++++++++-------- 36 files changed, 47 insertions(+), 47 deletions(-) diff --git a/examples/src/color_camera_control.cpp b/examples/src/color_camera_control.cpp index ba1f3f9e2..0d389f47c 100644 --- a/examples/src/color_camera_control.cpp +++ b/examples/src/color_camera_control.cpp @@ -64,7 +64,7 @@ int main() { videoEncoder->bitstream.link(videoMjpegOut->input); stillEncoder->bitstream.link(stillMjpegOut->input); - // Connect to device + // Connect to device and start pipeline dai::Device device(pipeline); // Create data queues diff --git a/examples/src/depth_crop_control.cpp b/examples/src/depth_crop_control.cpp index f00f0d8eb..2a24ab441 100644 --- a/examples/src/depth_crop_control.cpp +++ b/examples/src/depth_crop_control.cpp @@ -50,7 +50,7 @@ int main() { monoRight->out.link(stereo->left); monoLeft->out.link(stereo->right); - // Connect to device + // Connect to device and start pipeline dai::Device device(pipeline); // Queues diff --git a/examples/src/depth_preview.cpp b/examples/src/depth_preview.cpp index 4b324a377..b71f471f2 100644 --- a/examples/src/depth_preview.cpp +++ b/examples/src/depth_preview.cpp @@ -52,7 +52,7 @@ int main() { monoRight->out.link(depth->right); depth->disparity.link(xout->input); - // Pipeline is defined, now we can connect to the device + // Connect to device and start pipeline dai::Device device(pipeline); // Output queue will be used to get the disparity frames from the outputs defined above diff --git a/examples/src/device_queue_event.cpp b/examples/src/device_queue_event.cpp index e63dd2ca4..ec554a569 100644 --- a/examples/src/device_queue_event.cpp +++ b/examples/src/device_queue_event.cpp @@ -28,7 +28,7 @@ int main(int argc, char** argv) { camRgb->preview.link(xoutRgb->input); camMono->out.link(xoutMono->input); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); // Clear queue events diff --git a/examples/src/encoding_max_limit.cpp b/examples/src/encoding_max_limit.cpp index 80ef00b9d..5309eb3c7 100644 --- a/examples/src/encoding_max_limit.cpp +++ b/examples/src/encoding_max_limit.cpp @@ -54,10 +54,8 @@ int main(int argc, char** argv) { ve2->bitstream.link(ve2Out->input); ve3->bitstream.link(ve3Out->input); - // Pipeline is defined, now we can connect to the device + // Connect to device and start pipeline dai::Device device(pipeline); - // Start pipeline - device.startPipeline(); // Output queues will be used to get the encoded data from the output defined above auto outQ1 = device.getOutputQueue("ve1Out", 30, true); diff --git a/examples/src/mjpeg_encoding_example.cpp b/examples/src/mjpeg_encoding_example.cpp index d365e0899..89c0a84bc 100644 --- a/examples/src/mjpeg_encoding_example.cpp +++ b/examples/src/mjpeg_encoding_example.cpp @@ -35,12 +35,12 @@ int main(int argc, char** argv) { camRgb->preview.link(xout2->input); videnc->bitstream.link(xout->input); - // Connect and start the pipeline + // Connect to device and start pipeline dai::Device device(pipeline); auto mjpegQueue = device.getOutputQueue("mjpeg", 8, false); auto previewQueue = device.getOutputQueue("preview", 8, false); - while(1) { + while(true) { auto t1 = steady_clock::now(); auto preview = previewQueue->get(); auto t2 = steady_clock::now(); diff --git a/examples/src/mono_camera_control.cpp b/examples/src/mono_camera_control.cpp index 567487fac..7adbff0b7 100644 --- a/examples/src/mono_camera_control.cpp +++ b/examples/src/mono_camera_control.cpp @@ -68,7 +68,7 @@ int main() { manipRight->out.link(manipOutRight->input); manipLeft->out.link(manipOutLeft->input); - // Connect to device + // Connect to device and start pipeline dai::Device device(pipeline); // Output queues will be used to get the grayscale frames diff --git a/examples/src/mono_camera_video.cpp b/examples/src/mono_camera_video.cpp index 43a057a3f..5b7d00225 100644 --- a/examples/src/mono_camera_video.cpp +++ b/examples/src/mono_camera_video.cpp @@ -25,7 +25,7 @@ int main() { // Linking monoLeft->out.link(xoutVideo->input); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); auto video = device.getOutputQueue("video"); diff --git a/examples/src/mono_depth_mobilenetssd.cpp b/examples/src/mono_depth_mobilenetssd.cpp index 0f4e620d3..29350233f 100644 --- a/examples/src/mono_depth_mobilenetssd.cpp +++ b/examples/src/mono_depth_mobilenetssd.cpp @@ -72,7 +72,7 @@ int main(int argc, char** argv) { manip->out.link(xoutRight->input); nn->out.link(nnOut->input); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); // Output queues will be used to get the grayscale / depth frames and nn data from the outputs defined above diff --git a/examples/src/mono_full_resolution_saver.cpp b/examples/src/mono_full_resolution_saver.cpp index d45424828..db3c99485 100644 --- a/examples/src/mono_full_resolution_saver.cpp +++ b/examples/src/mono_full_resolution_saver.cpp @@ -26,7 +26,7 @@ int main(int argc, char** argv) // Linking monoRight->out.link(xoutRight->input); - // Pipeline is defined, now we can connect to the device + // Connect to device and start pipeline dai::Device device(pipeline); // Queue diff --git a/examples/src/mono_mobilenet.cpp b/examples/src/mono_mobilenet.cpp index c15685340..121b0e576 100644 --- a/examples/src/mono_mobilenet.cpp +++ b/examples/src/mono_mobilenet.cpp @@ -56,7 +56,7 @@ int main(int argc, char** argv) { manip->out.link(manipOut->input); nn->out.link(nnOut->input); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); // Output queues will be used to get the grayscale frames and nn data from the outputs defined above diff --git a/examples/src/mono_preview.cpp b/examples/src/mono_preview.cpp index 2923a7b49..3fd8c26e5 100644 --- a/examples/src/mono_preview.cpp +++ b/examples/src/mono_preview.cpp @@ -28,6 +28,7 @@ int main() { monoRight->out.link(xoutRight->input); monoLeft->out.link(xoutLeft->input); + // Connect to device and start pipeline dai::Device device(pipeline); // Output queues will be used to get the grayscale frames from the outputs defined above diff --git a/examples/src/object_tracker.cpp b/examples/src/object_tracker.cpp index c34fbb599..20a777313 100644 --- a/examples/src/object_tracker.cpp +++ b/examples/src/object_tracker.cpp @@ -69,7 +69,7 @@ int main(int argc, char** argv) { detectionNetwork->out.link(objectTracker->inputDetections); objectTracker->out.link(trackerOut->input); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); auto preview = device.getOutputQueue("preview", 4, false); diff --git a/examples/src/opencv_support.cpp b/examples/src/opencv_support.cpp index fa6321325..9d34ba9f9 100644 --- a/examples/src/opencv_support.cpp +++ b/examples/src/opencv_support.cpp @@ -33,7 +33,7 @@ int main() { camRgb->video.link(xoutVideo->input); camRgb->preview.link(xoutPreview->input); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); auto video = device.getOutputQueue("video"); diff --git a/examples/src/rgb_camera_video.cpp b/examples/src/rgb_camera_video.cpp index 00146e0aa..00c67c2df 100644 --- a/examples/src/rgb_camera_video.cpp +++ b/examples/src/rgb_camera_video.cpp @@ -27,7 +27,7 @@ int main() { // Linking camRgb->video.link(xoutVideo->input); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); auto video = device.getOutputQueue("video"); diff --git a/examples/src/rgb_depth_aligned_example.cpp b/examples/src/rgb_depth_aligned_example.cpp index 0474dd15e..20ba2ad13 100644 --- a/examples/src/rgb_depth_aligned_example.cpp +++ b/examples/src/rgb_depth_aligned_example.cpp @@ -54,7 +54,7 @@ int main() { right->out.link(stereo->right); stereo->disparity.link(depthOut->input); - // Connect to device + // Connect to device and start pipeline dai::Device device(pipeline); // Sets queues size and behavior diff --git a/examples/src/rgb_encoding.cpp b/examples/src/rgb_encoding.cpp index bdfdedffc..c5787fc8c 100644 --- a/examples/src/rgb_encoding.cpp +++ b/examples/src/rgb_encoding.cpp @@ -33,7 +33,7 @@ int main(int argc, char** argv) { camRgb->video.link(videoEnc->input); videoEnc->bitstream.link(xout->input); - // Pipeline is defined, now we can connect to the device + // Connect to device and start pipeline dai::Device device(pipeline); // Output queue will be used to get the encoded data from the output defined above diff --git a/examples/src/rgb_encoding_mobilenet.cpp b/examples/src/rgb_encoding_mobilenet.cpp index 67d0dfc24..4d25d8fa4 100644 --- a/examples/src/rgb_encoding_mobilenet.cpp +++ b/examples/src/rgb_encoding_mobilenet.cpp @@ -61,7 +61,7 @@ int main(int argc, char** argv) { videoEncoder->bitstream.link(videoOut->input); nn->out.link(nnOut->input); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); // Queues diff --git a/examples/src/rgb_encoding_mono_mobilenet.cpp b/examples/src/rgb_encoding_mono_mobilenet.cpp index 767a8a16d..59839bfaa 100644 --- a/examples/src/rgb_encoding_mono_mobilenet.cpp +++ b/examples/src/rgb_encoding_mono_mobilenet.cpp @@ -70,7 +70,7 @@ int main(int argc, char** argv) { manip->out.link(manipOut->input); nn->out.link(nnOut->input); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); // Queues diff --git a/examples/src/rgb_encoding_mono_mobilenet_depth.cpp b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp index fa1f5f9a9..1d25af7ac 100644 --- a/examples/src/rgb_encoding_mono_mobilenet_depth.cpp +++ b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp @@ -86,7 +86,7 @@ int main(int argc, char** argv) { manip->out.link(manipOut->input); nn->out.link(nnOut->input); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); // Queues diff --git a/examples/src/rgb_full_resolution_saver.cpp b/examples/src/rgb_full_resolution_saver.cpp index 2a40d19da..e0bb29519 100644 --- a/examples/src/rgb_full_resolution_saver.cpp +++ b/examples/src/rgb_full_resolution_saver.cpp @@ -31,7 +31,7 @@ int main(int argc, char** argv) camRgb->video.link(videoEnc->input); videoEnc->bitstream.link(xoutJpeg->input); - // Pipeline is defined, now we can connect to the device + // Connect to device and start pipeline dai::Device device(pipeline); // Queues diff --git a/examples/src/rgb_mobilenet.cpp b/examples/src/rgb_mobilenet.cpp index bff419b26..afd1cba65 100644 --- a/examples/src/rgb_mobilenet.cpp +++ b/examples/src/rgb_mobilenet.cpp @@ -61,7 +61,7 @@ int main(int argc, char** argv) { camRgb->preview.link(nn->input); nn->out.link(nnOut->input); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); // Output queues will be used to get the rgb frames and nn data from the outputs defined above diff --git a/examples/src/rgb_mobilenet_4k.cpp b/examples/src/rgb_mobilenet_4k.cpp index 9165a2c91..ad07f05dd 100644 --- a/examples/src/rgb_mobilenet_4k.cpp +++ b/examples/src/rgb_mobilenet_4k.cpp @@ -57,7 +57,7 @@ int main(int argc, char** argv) { camRgb->preview.link(nn->input); nn->out.link(nnOut->input); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); // Queues diff --git a/examples/src/rgb_mono_encoding.cpp b/examples/src/rgb_mono_encoding.cpp index 8fdd8bc6c..f04a33f39 100644 --- a/examples/src/rgb_mono_encoding.cpp +++ b/examples/src/rgb_mono_encoding.cpp @@ -51,7 +51,7 @@ int main(int argc, char** argv) { ve2->bitstream.link(ve2Out->input); ve3->bitstream.link(ve3Out->input); - // Pipeline is defined, now we can connect to the device + // Connect to device and start pipeline dai::Device device(pipeline); // Output queues will be used to get the encoded data from the output defined above diff --git a/examples/src/rgb_preview.cpp b/examples/src/rgb_preview.cpp index d655630fc..650810148 100644 --- a/examples/src/rgb_preview.cpp +++ b/examples/src/rgb_preview.cpp @@ -25,7 +25,7 @@ int main() { // Linking camRgb->preview.link(xoutRgb->input); - // Pipeline is defined, now we can connect to the device + // Connect to device and start pipeline dai::Device device(pipeline); // Output queue will be used to get the rgb frames from the output defined above diff --git a/examples/src/spatial_location_calculator.cpp b/examples/src/spatial_location_calculator.cpp index 316c9b996..a5ebbd317 100644 --- a/examples/src/spatial_location_calculator.cpp +++ b/examples/src/spatial_location_calculator.cpp @@ -65,7 +65,7 @@ int main() { spatialDataCalculator->out.link(xoutSpatialData->input); xinSpatialCalcConfig->out.link(spatialDataCalculator->inputConfig); - // CONNECT TO DEVICE + // Connect to device and start pipeline dai::Device device(pipeline); auto depthQueue = device.getOutputQueue("depth", 8, false); diff --git a/examples/src/spatial_mobilenet.cpp b/examples/src/spatial_mobilenet.cpp index 4c31f2e3a..68dc36e5d 100644 --- a/examples/src/spatial_mobilenet.cpp +++ b/examples/src/spatial_mobilenet.cpp @@ -83,7 +83,7 @@ int main(int argc, char** argv) { stereo->depth.link(spatialDetectionNetwork->inputDepth); spatialDetectionNetwork->passthroughDepth.link(xoutDepth->input); - // Connect and start the pipeline + // Connect to device and start pipeline dai::Device device(pipeline); auto previewQueue = device.getOutputQueue("rgb", 4, false); diff --git a/examples/src/spatial_mobilenet_mono.cpp b/examples/src/spatial_mobilenet_mono.cpp index 17f695f69..2d587dcf0 100644 --- a/examples/src/spatial_mobilenet_mono.cpp +++ b/examples/src/spatial_mobilenet_mono.cpp @@ -85,7 +85,7 @@ int main(int argc, char** argv) { stereo->depth.link(spatialDetectionNetwork->inputDepth); spatialDetectionNetwork->passthroughDepth.link(xoutDepth->input); - // Connect and start the pipeline + // Connect to device and start pipeline dai::Device device(pipeline); // Output queues will be used to get the rgb frames and nn data from the outputs defined above diff --git a/examples/src/spatial_object_tracker.cpp b/examples/src/spatial_object_tracker.cpp index 0d02a0c34..6fd99e23b 100644 --- a/examples/src/spatial_object_tracker.cpp +++ b/examples/src/spatial_object_tracker.cpp @@ -88,7 +88,7 @@ int main(int argc, char** argv) { stereo->depth.link(spatialDetectionNetwork->inputDepth); - // Connect and start the pipeline + // Connect to device and start pipeline dai::Device device(pipeline); auto preview = device.getOutputQueue("preview", 4, false); diff --git a/examples/src/spatial_tiny_yolo.cpp b/examples/src/spatial_tiny_yolo.cpp index f1ff4af3d..c1709eed1 100644 --- a/examples/src/spatial_tiny_yolo.cpp +++ b/examples/src/spatial_tiny_yolo.cpp @@ -98,7 +98,7 @@ int main(int argc, char** argv) { stereo->depth.link(spatialDetectionNetwork->inputDepth); spatialDetectionNetwork->passthroughDepth.link(xoutDepth->input); - // Connect and start the pipeline + // Connect to device and start pipeline dai::Device device(pipeline); auto previewQueue = device.getOutputQueue("rgb", 4, false); diff --git a/examples/src/stereo_depth_video.cpp b/examples/src/stereo_depth_video.cpp index 3ffbd4f59..1522c5eef 100644 --- a/examples/src/stereo_depth_video.cpp +++ b/examples/src/stereo_depth_video.cpp @@ -85,7 +85,7 @@ int main() { monoRight->out.link(xoutRight->input); } - // Connect and start the pipeline + // Connect to device and start pipeline dai::Device device(pipeline); auto leftQueue = device.getOutputQueue("left", 8, false); @@ -95,7 +95,7 @@ int main() { auto rectifLeftQueue = withDepth ? device.getOutputQueue("rectified_left", 8, false) : nullptr; auto rectifRightQueue = withDepth ? device.getOutputQueue("rectified_right", 8, false) : nullptr; - while(1) { + while(true) { auto left = leftQueue->get(); cv::imshow("left", cv::Mat(left->getHeight(), left->getWidth(), CV_8UC1, left->getData().data())); auto right = rightQueue->get(); diff --git a/examples/src/system_information.cpp b/examples/src/system_information.cpp index e3f5f0c3f..96f2b729a 100644 --- a/examples/src/system_information.cpp +++ b/examples/src/system_information.cpp @@ -45,7 +45,7 @@ int main() { dai::MemoryInfo cmx = device.getCmxMemoryUsage(); printf("Cmx used / total - %.2f / %.2f MiB\n", cmx.used / (1024.0f * 1024.0f), cmx.total / (1024.0f * 1024.0f)); - // Start pipeline + // Connect to device and start pipeline device.startPipeline(pipeline); // Output queue will be used to get the system info diff --git a/examples/src/tiny_yolo_v3_device_side_decoding.cpp b/examples/src/tiny_yolo_v3_device_side_decoding.cpp index 718009746..e5adde17f 100644 --- a/examples/src/tiny_yolo_v3_device_side_decoding.cpp +++ b/examples/src/tiny_yolo_v3_device_side_decoding.cpp @@ -73,7 +73,7 @@ int main(int argc, char** argv) { detectionNetwork->out.link(nnOut->input); - // Connect and start the pipeline + // Connect to device and start pipeline dai::Device device(pipeline); auto qRgb = device.getOutputQueue("rgb", 4, false); diff --git a/examples/src/tiny_yolo_v4_device_side_decoding.cpp b/examples/src/tiny_yolo_v4_device_side_decoding.cpp index 98e22abb0..9e8e34f54 100644 --- a/examples/src/tiny_yolo_v4_device_side_decoding.cpp +++ b/examples/src/tiny_yolo_v4_device_side_decoding.cpp @@ -78,7 +78,7 @@ int main(int argc, char** argv) { detectionNetwork->out.link(nnOut->input); - // Connect and start the pipeline + // Connect to device and start pipeline dai::Device device(pipeline); //Output queues will be used to get the rgb frames and nn data from the outputs defined above diff --git a/examples/src/video_mobilenet.cpp b/examples/src/video_mobilenet.cpp index 75664bc30..bb38f563d 100644 --- a/examples/src/video_mobilenet.cpp +++ b/examples/src/video_mobilenet.cpp @@ -46,11 +46,11 @@ int main(int argc, char** argv) { nn->setNumInferenceThreads(2); nn->input.setBlocking(false); - // Create outputs + // Linking xinFrame->out.link(nn->input); nn->out.link(nnOut->input); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); // Input queue will be used to send video frames to the device. diff --git a/examples/src/webcam_mobilenet_example.cpp b/examples/src/webcam_mobilenet_example.cpp index ef7eea9d3..dcce8fcf4 100644 --- a/examples/src/webcam_mobilenet_example.cpp +++ b/examples/src/webcam_mobilenet_example.cpp @@ -25,37 +25,38 @@ int main(int argc, char** argv) { using namespace std; - // CREATE PIPELINE + // Create pipeline dai::Pipeline pipeline; - auto xin = pipeline.create(); + // Define sources and outputs auto nn = pipeline.create(); + auto xin = pipeline.create(); auto xout = pipeline.create(); + xin->setStreamName("nn_in"); + xout->setStreamName("nn_out"); + // Properties nn->setBlobPath(nnPath); - xin->setStreamName("nn_in"); xin->setMaxDataSize(300 * 300 * 3); xin->setNumFrames(4); - xout->setStreamName("nn_out"); - - // Link plugins XLINK -> NN -> XLINK + // Linking xin->out.link(nn->input); nn->out.link(xout->input); // Open Webcam cv::VideoCapture webcam(camId); - // Connect to device with above created pipeline + // Connect to device and start pipeline dai::Device device(pipeline); cv::Mat frame; auto in = device.getInputQueue("nn_in"); auto detections = device.getOutputQueue("nn_out"); - while(1) { + while(true) { // data to send further auto tensor = std::make_shared(); From 5571934868ef38e168572a12b27ef84e6791342b Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Wed, 5 May 2021 22:52:25 +0300 Subject: [PATCH 26/56] Renamed examples --- examples/src/{mono_camera_video.cpp => mono_video.cpp} | 0 examples/src/{color_camera_control.cpp => rgb_camera_control.cpp} | 0 examples/src/{rgb_camera_video.cpp => rgb_video.cpp} | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename examples/src/{mono_camera_video.cpp => mono_video.cpp} (100%) rename examples/src/{color_camera_control.cpp => rgb_camera_control.cpp} (100%) rename examples/src/{rgb_camera_video.cpp => rgb_video.cpp} (100%) diff --git a/examples/src/mono_camera_video.cpp b/examples/src/mono_video.cpp similarity index 100% rename from examples/src/mono_camera_video.cpp rename to examples/src/mono_video.cpp diff --git a/examples/src/color_camera_control.cpp b/examples/src/rgb_camera_control.cpp similarity index 100% rename from examples/src/color_camera_control.cpp rename to examples/src/rgb_camera_control.cpp diff --git a/examples/src/rgb_camera_video.cpp b/examples/src/rgb_video.cpp similarity index 100% rename from examples/src/rgb_camera_video.cpp rename to examples/src/rgb_video.cpp From 28f844052c7a90d519b62a2a5f2d05d4fe4ade48 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Thu, 6 May 2021 18:49:35 +0300 Subject: [PATCH 27/56] Fix formatting, fix a merge issue --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- include/depthai/pipeline/Pipeline.hpp | 1 + shared/depthai-shared | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index 006c5a0a5..88d2756dd 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "1a37fa62de75de0dfe59358a868d33be040ea2b6") +set(DEPTHAI_DEVICE_SIDE_COMMIT "fddbe1b7868bdb67945b50ddc9dfaeb15bba2d60") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/include/depthai/pipeline/Pipeline.hpp b/include/depthai/pipeline/Pipeline.hpp index a26aa8982..31a937bea 100644 --- a/include/depthai/pipeline/Pipeline.hpp +++ b/include/depthai/pipeline/Pipeline.hpp @@ -219,6 +219,7 @@ class Pipeline { /// Get required OpenVINO version to run this pipeline OpenVINO::Version getOpenVINOVersion() const { return impl()->getPipelineOpenVINOVersion(); + } /// Set a camera IQ (Image Quality) tuning blob, used for all cameras void setCameraTuningBlobPath(const std::string& path) { diff --git a/shared/depthai-shared b/shared/depthai-shared index f9e5f9111..cd7a857f4 160000 --- a/shared/depthai-shared +++ b/shared/depthai-shared @@ -1 +1 @@ -Subproject commit f9e5f9111b7183ec5114ac01529e6055493d92e5 +Subproject commit cd7a857f4bc4a4d0e5348a6cc6a536c0a3abc7b7 From 29327c9dbdabdc8e5e0ce3099a47534c830eae61 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Thu, 6 May 2021 23:00:56 +0300 Subject: [PATCH 28/56] Remove redundant in/out flags from ifstream/ofstream across codebase --- src/device/DeviceBootloader.cpp | 6 +++--- src/pipeline/Pipeline.cpp | 2 +- src/pipeline/node/NeuralNetwork.cpp | 2 +- src/pipeline/node/StereoDepth.cpp | 2 +- src/utility/Resources.cpp | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/device/DeviceBootloader.cpp b/src/device/DeviceBootloader.cpp index c07ebc56c..c578afd00 100644 --- a/src/device/DeviceBootloader.cpp +++ b/src/device/DeviceBootloader.cpp @@ -62,7 +62,7 @@ std::vector DeviceBootloader::createDepthaiApplicationPackage(Pipeline& // Prepare device firmware std::vector deviceFirmware; if(pathToCmd != "") { - std::ifstream fwStream(pathToCmd, std::ios::in | std::ios::binary); + std::ifstream fwStream(pathToCmd, std::ios::binary); if(!fwStream.is_open()) throw std::runtime_error("Cannot create application package, device firmware at path: " + pathToCmd + " doesn't exist"); deviceFirmware = std::vector(std::istreambuf_iterator(fwStream), {}); } else { @@ -274,7 +274,7 @@ std::tuple DeviceBootloader::flash(std::function void DeviceBootloader::saveDepthaiApplicationPackage(std::string path, Pipeline& pipeline, std::string pathToCmd) { auto dap = createDepthaiApplicationPackage(pipeline, pathToCmd); - std::ofstream outfile(path, std::ios::out | std::ios::binary); + std::ofstream outfile(path, std::ios::binary); outfile.write(reinterpret_cast(dap.data()), dap.size()); } @@ -320,7 +320,7 @@ std::tuple DeviceBootloader::flashDepthaiApplicationPackage(s std::tuple DeviceBootloader::flashBootloader(std::function progressCb, std::string path) { std::vector package; if(path != "") { - std::ifstream fwStream(path, std::ios::in | std::ios::binary); + std::ifstream fwStream(path, std::ios::binary); if(!fwStream.is_open()) throw std::runtime_error("Cannot flash bootloader, binary at path: " + path + " doesn't exist"); package = std::vector(std::istreambuf_iterator(fwStream), {}); } else { diff --git a/src/pipeline/Pipeline.cpp b/src/pipeline/Pipeline.cpp index abac380bc..853dd2fc1 100644 --- a/src/pipeline/Pipeline.cpp +++ b/src/pipeline/Pipeline.cpp @@ -256,7 +256,7 @@ OpenVINO::Version PipelineImpl::getPipelineOpenVINOVersion() const { void PipelineImpl::setCameraTuningBlobPath(const std::string& path) { std::string assetKey = "camTuning"; - std::ifstream blobStream(path, std::ios::in | std::ios::binary); + std::ifstream blobStream(path, std::ios::binary); if(!blobStream.is_open()) { throw std::runtime_error("Pipeline | Couldn't open camera tuning blob at path: " + path); } diff --git a/src/pipeline/node/NeuralNetwork.cpp b/src/pipeline/node/NeuralNetwork.cpp index 456f144e5..7e3f56681 100644 --- a/src/pipeline/node/NeuralNetwork.cpp +++ b/src/pipeline/node/NeuralNetwork.cpp @@ -44,7 +44,7 @@ NeuralNetwork::BlobAssetInfo NeuralNetwork::loadBlob(const std::string& path) { // Load blob in blobPath into asset // And mark in properties where to look for it - std::ifstream blobStream(blobPath, std::ios::in | std::ios::binary); + std::ifstream blobStream(blobPath, std::ios::binary); if(!blobStream.is_open()) throw std::runtime_error("NeuralNetwork node | Blob at path: " + blobPath + " doesn't exist"); // Create an asset (alignment 64) diff --git a/src/pipeline/node/StereoDepth.cpp b/src/pipeline/node/StereoDepth.cpp index 38aa97101..57219d18a 100644 --- a/src/pipeline/node/StereoDepth.cpp +++ b/src/pipeline/node/StereoDepth.cpp @@ -46,7 +46,7 @@ void StereoDepth::loadCalibrationData(const std::vector& data) { void StereoDepth::loadCalibrationFile(const std::string& path) { std::vector data; if(!path.empty()) { - std::ifstream calib(path, std::ios::in | std::ios::binary); + std::ifstream calib(path, std::ios::binary); if(!calib.is_open()) { throw std::runtime_error("StereoDepth node | Unable to open calibration file: " + path); } diff --git a/src/utility/Resources.cpp b/src/utility/Resources.cpp index 140300d20..62d1227b8 100644 --- a/src/utility/Resources.cpp +++ b/src/utility/Resources.cpp @@ -73,7 +73,7 @@ std::vector Resources::getDeviceBinary(OpenVINO::Version version, auto fwBinaryPath = spdlog::details::os::getenv("DEPTHAI_DEVICE_BINARY"); if(!fwBinaryPath.empty()) { // Load binary file at path - std::ifstream stream(fwBinaryPath, std::ios::in | std::ios::binary); + std::ifstream stream(fwBinaryPath, std::ios::binary); if(!stream.is_open()) { // Throw an error // TODO(themarpe) - Unify exceptions into meaningful groups From 5fee02c669f05f5d83ed27332eda8d05d74bba7c Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Thu, 6 May 2021 23:16:09 +0300 Subject: [PATCH 29/56] Rename --- examples/CMakeLists.txt | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index cdf7ad0b9..6885fedcf 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -106,7 +106,7 @@ hunter_private_data( ## message(STATUS "Location of test1.data: ${test1_data}") -# Color camera preview output example +# RGB camera preview output example dai_add_example(rgb_preview src/rgb_preview.cpp) # Mono camera preview output example @@ -142,8 +142,8 @@ target_compile_definitions(rgb_encoding_mono_mobilenet_depth PRIVATE BLOB_PATH=" dai_add_example(encoding_max_limit src/encoding_max_limit.cpp) -# Color Camera config example -dai_add_example(color_camera_control src/color_camera_control.cpp) +# RGB Camera config example +dai_add_example(rgb_camera_control src/rgb_camera_control.cpp) dai_add_example(mono_camera_control src/mono_camera_control.cpp) @@ -164,6 +164,7 @@ target_compile_definitions(rgb_encoding_mobilenet PRIVATE BLOB_PATH="${mobilenet # Image Manip node examples dai_add_example(image_manip src/image_manip_example.cpp) +# Imagae manip node exapmle with warping dai_add_example(image_manip_warp src/image_manip_warp.cpp) # Device side decoding example for tiny-yolo-v3 @@ -198,11 +199,11 @@ target_compile_definitions(spatial_tiny_yolo_v4 PRIVATE BLOB_PATH="${tiny_yolo_v dai_add_example(spatial_location_calculator src/spatial_location_calculator.cpp) -# Color camera video output example -dai_add_example(rgb_camera_video src/rgb_camera_video.cpp) +# RGB camera video output example +dai_add_example(rgb_video src/rgb_video.cpp) # Mono camera video output example -dai_add_example(mono_camera_video src/mono_camera_video.cpp) +dai_add_example(mono_video src/mono_video.cpp) # Object tracker example on mobilenet SSD output dai_add_example(object_tracker src/object_tracker.cpp) From fa77872e3aa2a79a010b2c3a5a18022335db84d8 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Sat, 8 May 2021 01:11:52 +0300 Subject: [PATCH 30/56] Remove duplicated example --- examples/CMakeLists.txt | 3 --- examples/src/mono_video.cpp | 46 ------------------------------------- 2 files changed, 49 deletions(-) delete mode 100644 examples/src/mono_video.cpp diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 6885fedcf..ea0c7f815 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -202,9 +202,6 @@ dai_add_example(spatial_location_calculator src/spatial_location_calculator.cpp) # RGB camera video output example dai_add_example(rgb_video src/rgb_video.cpp) -# Mono camera video output example -dai_add_example(mono_video src/mono_video.cpp) - # Object tracker example on mobilenet SSD output dai_add_example(object_tracker src/object_tracker.cpp) target_compile_definitions(object_tracker PRIVATE BLOB_PATH="${mobilenet_blob}") diff --git a/examples/src/mono_video.cpp b/examples/src/mono_video.cpp deleted file mode 100644 index 5b7d00225..000000000 --- a/examples/src/mono_video.cpp +++ /dev/null @@ -1,46 +0,0 @@ -#include -#include - -// Inludes common necessary includes for development using depthai library -#include "depthai/depthai.hpp" - -int main() { - using namespace std; - - // Create pipeline - dai::Pipeline pipeline; - - // Define source and output - auto monoLeft = pipeline.create(); - auto xoutVideo = pipeline.create(); - - xoutVideo->setStreamName("video"); - - // Properties - monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); - - xoutVideo->input.setBlocking(false); - xoutVideo->input.setQueueSize(1); - - // Linking - monoLeft->out.link(xoutVideo->input); - - // Connect to device and start pipeline - dai::Device device(pipeline); - - auto video = device.getOutputQueue("video"); - - while(true) { - auto videoIn = video->get(); - - // Get BGR frame from NV12 encoded video frame to show with opencv - // Visualizing the frame on slower hosts might have overhead - cv::imshow("video", videoIn->getCvFrame()); - - int key = cv::waitKey(1); - if(key == 'q' || key == 'Q') { - return 0; - } - } - return 0; -} From 077dba1eed378bd8270054834a24832af2c05ff8 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Mon, 10 May 2021 16:12:45 +0300 Subject: [PATCH 31/56] Fixing errors --- examples/src/rgb_mobilenet_4k.cpp | 2 +- examples/src/spatial_location_calculator.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/src/rgb_mobilenet_4k.cpp b/examples/src/rgb_mobilenet_4k.cpp index ad07f05dd..894b113b3 100644 --- a/examples/src/rgb_mobilenet_4k.cpp +++ b/examples/src/rgb_mobilenet_4k.cpp @@ -66,7 +66,7 @@ int main(int argc, char** argv) { auto qDet = device.getOutputQueue("nn", 4, false); // Add bounding boxes and text to the frame and show it to the user - auto displayFrame = [](std::string name, auto frame, auto detections) { + auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { auto color = cv::Scalar(255, 0, 0); // nn data, being the bounding box locations, are in <0..1> range - they need to be normalized with frame width/height for(auto& detection : detections) { diff --git a/examples/src/spatial_location_calculator.cpp b/examples/src/spatial_location_calculator.cpp index a5ebbd317..99a9e5d2c 100644 --- a/examples/src/spatial_location_calculator.cpp +++ b/examples/src/spatial_location_calculator.cpp @@ -134,7 +134,7 @@ int main() { newConfig = true; } break; - case 'device': + case 'd': if(bottomRight.x + stepSize <= 1) { topLeft.x += stepSize; bottomRight.x += stepSize; From 9aa71f71cc7d40ae40746098bf98aa15b2511162 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Mon, 10 May 2021 16:21:09 +0300 Subject: [PATCH 32/56] Renamed rgb_depth_aligned_example to rgb_depth_aligned --- examples/CMakeLists.txt | 2 +- .../{rgb_depth_aligned_example.cpp => rgb_depth_aligned.cpp} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename examples/src/{rgb_depth_aligned_example.cpp => rgb_depth_aligned.cpp} (100%) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index ea0c7f815..2a184ae7c 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -221,7 +221,7 @@ target_compile_definitions(webcam_mobilenet PRIVATE BLOB_PATH="${mobilenet_8shav dai_add_example(mjpeg_encoding src/mjpeg_encoding_example.cpp) # RGB-depth alignment example -dai_add_example(rgb_depth_aligned src/rgb_depth_aligned_example.cpp) +dai_add_example(rgb_depth_aligned src/rgb_depth_aligned.cpp) # Sync example between NN and camera frames dai_add_example(camera_mobilenet_sync src/camera_mobilenet_sync_example.cpp) diff --git a/examples/src/rgb_depth_aligned_example.cpp b/examples/src/rgb_depth_aligned.cpp similarity index 100% rename from examples/src/rgb_depth_aligned_example.cpp rename to examples/src/rgb_depth_aligned.cpp From d0212dff648b7dc6cc64ad265d7592be32a68a21 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Mon, 10 May 2021 17:33:42 +0300 Subject: [PATCH 33/56] Fix createDirectory for windows --- examples/src/depth_crop_control.cpp | 14 ++++++------ examples/src/depth_preview.cpp | 4 ++-- examples/src/device_queue_event.cpp | 1 + examples/src/encoding_max_limit.cpp | 2 +- examples/src/mono_camera_control.cpp | 12 +++++----- examples/src/mono_depth_mobilenetssd.cpp | 7 +++--- examples/src/mono_full_resolution_saver.cpp | 17 +++++++------- examples/src/mono_mobilenet.cpp | 3 +-- examples/src/opencv_support.cpp | 3 +-- examples/src/rgb_camera_control.cpp | 1 + examples/src/rgb_encoding.cpp | 2 +- examples/src/rgb_encoding_mobilenet.cpp | 2 +- examples/src/rgb_encoding_mono_mobilenet.cpp | 2 +- .../src/rgb_encoding_mono_mobilenet_depth.cpp | 6 ++--- examples/src/rgb_full_resolution_saver.cpp | 19 ++++++++-------- examples/src/rgb_mobilenet.cpp | 9 ++++---- examples/src/rgb_mobilenet_4k.cpp | 5 ++--- examples/src/rgb_mono_encoding.cpp | 2 +- examples/src/spatial_mobilenet_mono.cpp | 4 ++-- examples/src/spatial_object_tracker.cpp | 1 - .../src/tiny_yolo_v4_device_side_decoding.cpp | 8 +++---- examples/src/utility.cpp | 22 +++++++++++++++++++ examples/src/utility.hpp | 1 + examples/src/video_mobilenet.cpp | 3 +-- 24 files changed, 84 insertions(+), 66 deletions(-) diff --git a/examples/src/depth_crop_control.cpp b/examples/src/depth_crop_control.cpp index 2a24ab441..8ef97a294 100644 --- a/examples/src/depth_crop_control.cpp +++ b/examples/src/depth_crop_control.cpp @@ -59,7 +59,7 @@ int main() { while(true) { auto inDepth = q->get(); - cv::Mat depthFrame = inDepth ->getFrame(); + cv::Mat depthFrame = inDepth->getFrame(); // Frame is transformed, the color map will be applied to highlight the depth info cv::Mat depthFrameColor; cv::normalize(depthFrame, depthFrameColor, 255, 0, cv::NORM_INF, CV_8UC1); @@ -74,33 +74,33 @@ int main() { if(key == 'q') { break; } else if(key == 'w') { - if (topLeft.y - stepSize >= 0) { + if(topLeft.y - stepSize >= 0) { topLeft.y -= stepSize; bottomRight.y -= stepSize; sendCamConfig = true; } } else if(key == 'a') { - if (topLeft.x - stepSize >= 0) { + if(topLeft.x - stepSize >= 0) { topLeft.x -= stepSize; bottomRight.x -= stepSize; sendCamConfig = true; } } else if(key == 's') { - if (bottomRight.y + stepSize <= 1) { + if(bottomRight.y + stepSize <= 1) { topLeft.y += stepSize; bottomRight.y += stepSize; sendCamConfig = true; } } else if(key == 'd') { - if (bottomRight.x + stepSize <= 1) { + if(bottomRight.x + stepSize <= 1) { topLeft.x += stepSize; - bottomRight.x +=stepSize; + bottomRight.x += stepSize; sendCamConfig = true; } } // Send new config to camera - if (sendCamConfig) { + if(sendCamConfig) { dai::ImageManipConfig cfg; cfg.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); configQueue->send(cfg); diff --git a/examples/src/depth_preview.cpp b/examples/src/depth_preview.cpp index b71f471f2..b484ae2fe 100644 --- a/examples/src/depth_preview.cpp +++ b/examples/src/depth_preview.cpp @@ -37,10 +37,10 @@ int main() { // Normal disparity values range from 0..95, will be used for normalization int max_disparity = 95; - if (extended_disparity) max_disparity *= 2; // Double the range + if(extended_disparity) max_disparity *= 2; // Double the range depth->setExtendedDisparity(extended_disparity); - if (subpixel) max_disparity *= 32; // 5 fractional bits, x32 + if(subpixel) max_disparity *= 32; // 5 fractional bits, x32 depth->setSubpixel(subpixel); // When we get disparity to the host, we will multiply all values with the multiplier diff --git a/examples/src/device_queue_event.cpp b/examples/src/device_queue_event.cpp index ec554a569..5bab6be0e 100644 --- a/examples/src/device_queue_event.cpp +++ b/examples/src/device_queue_event.cpp @@ -1,5 +1,6 @@ #include #include + #include "utility.hpp" // Inludes common necessary includes for development using depthai library diff --git a/examples/src/encoding_max_limit.cpp b/examples/src/encoding_max_limit.cpp index 5309eb3c7..d380441a5 100644 --- a/examples/src/encoding_max_limit.cpp +++ b/examples/src/encoding_max_limit.cpp @@ -5,7 +5,7 @@ // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" - // Keyboard interrupt (Ctrl + C) detected +// Keyboard interrupt (Ctrl + C) detected static std::atomic alive{true}; static void sigintHandler(int signum) { alive = false; diff --git a/examples/src/mono_camera_control.cpp b/examples/src/mono_camera_control.cpp index 7adbff0b7..832c7289b 100644 --- a/examples/src/mono_camera_control.cpp +++ b/examples/src/mono_camera_control.cpp @@ -113,33 +113,33 @@ int main() { ctrl.setManualExposure(exp_time, sens_iso); controlQueue->send(ctrl); } else if(key == 'w') { - if (topLeft.y - stepSize >= 0) { + if(topLeft.y - stepSize >= 0) { topLeft.y -= stepSize; bottomRight.y -= stepSize; sendCamConfig = true; } } else if(key == 'a') { - if (topLeft.x - stepSize >= 0) { + if(topLeft.x - stepSize >= 0) { topLeft.x -= stepSize; bottomRight.x -= stepSize; sendCamConfig = true; } } else if(key == 's') { - if (bottomRight.y + stepSize <= 1) { + if(bottomRight.y + stepSize <= 1) { topLeft.y += stepSize; bottomRight.y += stepSize; sendCamConfig = true; } } else if(key == 'd') { - if (bottomRight.x + stepSize <= 1) { + if(bottomRight.x + stepSize <= 1) { topLeft.x += stepSize; - bottomRight.x +=stepSize; + bottomRight.x += stepSize; sendCamConfig = true; } } // Send new config to camera - if (sendCamConfig) { + if(sendCamConfig) { dai::ImageManipConfig cfg; cfg.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); configQueue->send(cfg); diff --git a/examples/src/mono_depth_mobilenetssd.cpp b/examples/src/mono_depth_mobilenetssd.cpp index 29350233f..2863cb2a4 100644 --- a/examples/src/mono_depth_mobilenetssd.cpp +++ b/examples/src/mono_depth_mobilenetssd.cpp @@ -115,7 +115,7 @@ int main(int argc, char** argv) { cv::Mat rightFrame = inRight->getCvFrame(); cv::Mat disparityFrame = inDisparity->getCvFrame(); - if (flipRectified){ + if(flipRectified) { cv::flip(rightFrame, rightFrame, 1); for(auto& detection : detections) { @@ -125,14 +125,13 @@ int main(int argc, char** argv) { } } disparityFrame.convertTo(disparityFrame, CV_8UC1, disparity_multiplier); - //Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html + // Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html cv::applyColorMap(disparityFrame, disparityFrame, cv::COLORMAP_JET); show("disparity", disparityFrame, detections); show("rectified right", rightFrame, detections); int key = cv::waitKey(1); - if(key == 'q' || key == 'Q') - return 0; + if(key == 'q' || key == 'Q') return 0; } return 0; } diff --git a/examples/src/mono_full_resolution_saver.cpp b/examples/src/mono_full_resolution_saver.cpp index db3c99485..0f69a984a 100644 --- a/examples/src/mono_full_resolution_saver.cpp +++ b/examples/src/mono_full_resolution_saver.cpp @@ -1,13 +1,14 @@ +#include #include #include -#include -#include + +#include "errno.h" // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" +#include "utility.hpp" -int main(int argc, char** argv) -{ +int main(int argc, char** argv) { using namespace std; using namespace std::chrono; @@ -32,7 +33,8 @@ int main(int argc, char** argv) // Queue auto qRight = device.getOutputQueue("right", 4, false); - mkdir("07_data", 0777); + std::string dirName = "mono_data"; + createDirectory(dirName); while(true) { auto inRight = qRight->get(); @@ -41,13 +43,12 @@ int main(int argc, char** argv) uint64_t time = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); std::stringstream videoStr; - videoStr << "07_data/" << time << ".png"; + videoStr << dirName << "/" << time << ".png"; // After showing the frame, it's being stored inside a target directory as a PNG image cv::imwrite(videoStr.str(), inRight->getCvFrame()); int key = cv::waitKey(1); - if(key == 'q' || key == 'Q') - return 0; + if(key == 'q' || key == 'Q') return 0; } return 0; } diff --git a/examples/src/mono_mobilenet.cpp b/examples/src/mono_mobilenet.cpp index 121b0e576..78a2bd767 100644 --- a/examples/src/mono_mobilenet.cpp +++ b/examples/src/mono_mobilenet.cpp @@ -97,8 +97,7 @@ int main(int argc, char** argv) { displayFrame("right", frame, detections); int key = cv::waitKey(1); - if(key == 'q' || key == 'Q') - return 0; + if(key == 'q' || key == 'Q') return 0; } return 0; } diff --git a/examples/src/opencv_support.cpp b/examples/src/opencv_support.cpp index 9d34ba9f9..53c7c338d 100644 --- a/examples/src/opencv_support.cpp +++ b/examples/src/opencv_support.cpp @@ -50,8 +50,7 @@ int main() { cv::imshow("preview", previewFrame->getFrame()); int key = cv::waitKey(1); - if(key == 'q' || key == 'Q') - return 0; + if(key == 'q' || key == 'Q') return 0; } return 0; } diff --git a/examples/src/rgb_camera_control.cpp b/examples/src/rgb_camera_control.cpp index 0d389f47c..cfa24f7f6 100644 --- a/examples/src/rgb_camera_control.cpp +++ b/examples/src/rgb_camera_control.cpp @@ -12,6 +12,7 @@ */ #include #include + #include "utility.hpp" // Inludes common necessary includes for development using depthai library diff --git a/examples/src/rgb_encoding.cpp b/examples/src/rgb_encoding.cpp index c5787fc8c..a7446a97c 100644 --- a/examples/src/rgb_encoding.cpp +++ b/examples/src/rgb_encoding.cpp @@ -5,7 +5,7 @@ // Includes common necessary includes for development using depthai library #include "depthai/depthai.hpp" - // Keyboard interrupt (Ctrl + C) detected +// Keyboard interrupt (Ctrl + C) detected static std::atomic alive{true}; static void sigintHandler(int signum) { alive = false; diff --git a/examples/src/rgb_encoding_mobilenet.cpp b/examples/src/rgb_encoding_mobilenet.cpp index 4d25d8fa4..40f155bf7 100644 --- a/examples/src/rgb_encoding_mobilenet.cpp +++ b/examples/src/rgb_encoding_mobilenet.cpp @@ -109,7 +109,7 @@ int main(int argc, char** argv) { displayFrame("rgb", frame, detections); int key = cv::waitKey(1); - if(key == 'q' || key == 'Q'){ + if(key == 'q' || key == 'Q') { std::cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << std::endl; std::cout << "ffmpeg -framerate 30 -i video.h264 -c copy video.mp4" << std::endl; return 0; diff --git a/examples/src/rgb_encoding_mono_mobilenet.cpp b/examples/src/rgb_encoding_mono_mobilenet.cpp index 59839bfaa..3c3a6dd2f 100644 --- a/examples/src/rgb_encoding_mono_mobilenet.cpp +++ b/examples/src/rgb_encoding_mono_mobilenet.cpp @@ -138,7 +138,7 @@ int main(int argc, char** argv) { cv::imshow("manip", frameManip); int key = cv::waitKey(1); - if(key == 'q' || key == 'Q'){ + if(key == 'q' || key == 'Q') { std::cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << std::endl; std::cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << std::endl; return 0; diff --git a/examples/src/rgb_encoding_mono_mobilenet_depth.cpp b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp index 1d25af7ac..a74118d59 100644 --- a/examples/src/rgb_encoding_mono_mobilenet_depth.cpp +++ b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp @@ -63,7 +63,7 @@ int main(int argc, char** argv) { // Note: the rectified streams are horizontally mirrored by default depth->setConfidenceThreshold(255); depth->setRectifyMirrorFrame(false); - depth->setRectifyEdgeFillColor(0); // Black, to better see the cutout + depth->setRectifyEdgeFillColor(0); // Black, to better see the cutout nn->setConfidenceThreshold(0.5); nn->setBlobPath(nnPath); @@ -118,7 +118,7 @@ int main(int argc, char** argv) { auto out1 = qRgbEnc->get(); videoFile.write((char*)out1->getData().data(), out1->getData().size()); - if (flipRectified){ + if(flipRectified) { cv::flip(frameDisparity, frameDisparity, 1); } frameDisparity.convertTo(frameDisparity, CV_8UC1, disparity_multiplier); @@ -187,7 +187,7 @@ int main(int argc, char** argv) { cv::imshow("manip", frameManip); int key = cv::waitKey(1); - if(key == 'q' || key == 'Q'){ + if(key == 'q' || key == 'Q') { std::cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << std::endl; std::cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << std::endl; return 0; diff --git a/examples/src/rgb_full_resolution_saver.cpp b/examples/src/rgb_full_resolution_saver.cpp index e0bb29519..84308d65f 100644 --- a/examples/src/rgb_full_resolution_saver.cpp +++ b/examples/src/rgb_full_resolution_saver.cpp @@ -1,13 +1,12 @@ +#include #include #include -#include -#include // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" +#include "utility.hpp" -int main(int argc, char** argv) -{ +int main(int argc, char** argv) { using namespace std::chrono; dai::Pipeline pipeline; @@ -38,26 +37,26 @@ int main(int argc, char** argv) auto qRgb = device.getOutputQueue("rgb", 30, false); auto qJpeg = device.getOutputQueue("jpeg", 30, true); - mkdir("06_data", 0777); + std::string dirName = "rgb_data"; + createDirectory(dirName); while(true) { auto inRgb = qRgb->tryGet(); - if (inRgb != NULL){ + if(inRgb != NULL) { cv::imshow("rgb", inRgb->getCvFrame()); } auto encFrames = qJpeg->tryGetAll(); - for(const auto& encFrame : encFrames){ + for(const auto& encFrame : encFrames) { uint64_t time = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); std::stringstream videoStr; - videoStr << "06_data/" << time << ".jpeg"; + videoStr << dirName << "/" << time << ".jpeg"; auto videoFile = std::ofstream(videoStr.str(), std::ios::binary); videoFile.write((char*)encFrame->getData().data(), encFrame->getData().size()); } int key = cv::waitKey(1); - if(key == 'q' || key == 'Q') - return 0; + if(key == 'q' || key == 'Q') return 0; } return 0; } diff --git a/examples/src/rgb_mobilenet.cpp b/examples/src/rgb_mobilenet.cpp index afd1cba65..15e745301 100644 --- a/examples/src/rgb_mobilenet.cpp +++ b/examples/src/rgb_mobilenet.cpp @@ -42,7 +42,7 @@ int main(int argc, char** argv) { nnOut->setStreamName("nn"); // Properties - camRgb->setPreviewSize(300, 300); // NN input + camRgb->setPreviewSize(300, 300); // NN input camRgb->setInterleaved(false); camRgb->setFps(40); // Define a neural network that will make predictions based on the source frames @@ -52,7 +52,7 @@ int main(int argc, char** argv) { nn->input.setBlocking(false); // Linking - if (syncNN) { + if(syncNN) { nn->passthrough.link(xoutRgb->input); } else { camRgb->preview.link(xoutRgb->input); @@ -113,14 +113,13 @@ int main(int argc, char** argv) { } std::stringstream fpsStr; - fpsStr << "NN fps: " <getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, 255, 0, 0); displayFrame("video", frame, detections); int key = cv::waitKey(1); - if(key == 'q' || key == 'Q') - return 0; + if(key == 'q' || key == 'Q') return 0; } return 0; } diff --git a/examples/src/rgb_mobilenet_4k.cpp b/examples/src/rgb_mobilenet_4k.cpp index 894b113b3..bd30bcd43 100644 --- a/examples/src/rgb_mobilenet_4k.cpp +++ b/examples/src/rgb_mobilenet_4k.cpp @@ -41,7 +41,7 @@ int main(int argc, char** argv) { nnOut->setStreamName("nn"); // Properties - camRgb->setPreviewSize(300, 300); // NN input + camRgb->setPreviewSize(300, 300); // NN input camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); camRgb->setInterleaved(false); camRgb->setPreviewKeepAspectRatio(false); @@ -106,8 +106,7 @@ int main(int argc, char** argv) { displayFrame("preview", previewFrame, detections); int key = cv::waitKey(1); - if(key == 'q' || key == 'Q') - return 0; + if(key == 'q' || key == 'Q') return 0; } return 0; } diff --git a/examples/src/rgb_mono_encoding.cpp b/examples/src/rgb_mono_encoding.cpp index f04a33f39..904a662aa 100644 --- a/examples/src/rgb_mono_encoding.cpp +++ b/examples/src/rgb_mono_encoding.cpp @@ -5,7 +5,7 @@ // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" - // Keyboard interrupt (Ctrl + C) detected +// Keyboard interrupt (Ctrl + C) detected static std::atomic alive{true}; static void sigintHandler(int signum) { alive = false; diff --git a/examples/src/spatial_mobilenet_mono.cpp b/examples/src/spatial_mobilenet_mono.cpp index 2d587dcf0..aea0809ff 100644 --- a/examples/src/spatial_mobilenet_mono.cpp +++ b/examples/src/spatial_mobilenet_mono.cpp @@ -60,7 +60,7 @@ int main(int argc, char** argv) { // StereoDepth stereo->setConfidenceThreshold(255); - //Define a neural network that will make predictions based on the source frames + // Define a neural network that will make predictions based on the source frames spatialDetectionNetwork->setConfidenceThreshold(0.5f); spatialDetectionNetwork->setBlobPath(nnPath); spatialDetectionNetwork->input.setBlocking(false); @@ -183,7 +183,7 @@ int main(int argc, char** argv) { cv::imshow("rectified right", rectifiedRight); int key = cv::waitKey(1); - if(key == 'q' || key =='Q') { + if(key == 'q' || key == 'Q') { return 0; } } diff --git a/examples/src/spatial_object_tracker.cpp b/examples/src/spatial_object_tracker.cpp index 6fd99e23b..fbac703c2 100644 --- a/examples/src/spatial_object_tracker.cpp +++ b/examples/src/spatial_object_tracker.cpp @@ -87,7 +87,6 @@ int main(int argc, char** argv) { stereo->depth.link(spatialDetectionNetwork->inputDepth); - // Connect to device and start pipeline dai::Device device(pipeline); diff --git a/examples/src/tiny_yolo_v4_device_side_decoding.cpp b/examples/src/tiny_yolo_v4_device_side_decoding.cpp index 9e8e34f54..942fded16 100644 --- a/examples/src/tiny_yolo_v4_device_side_decoding.cpp +++ b/examples/src/tiny_yolo_v4_device_side_decoding.cpp @@ -61,8 +61,8 @@ int main(int argc, char** argv) { detectionNetwork->setConfidenceThreshold(0.5f); detectionNetwork->setNumClasses(80); detectionNetwork->setCoordinateSize(4); - detectionNetwork->setAnchors({10,14, 23,27, 37,58, 81,82, 135,169, 344,319}); - detectionNetwork->setAnchorMasks({{"side26", {1, 2, 3}},{"side13", {3, 4, 5}}}); + detectionNetwork->setAnchors({10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319}); + detectionNetwork->setAnchorMasks({{"side26", {1, 2, 3}}, {"side13", {3, 4, 5}}}); detectionNetwork->setIouThreshold(0.5f); detectionNetwork->setBlobPath(nnPath); detectionNetwork->setNumInferenceThreads(2); @@ -81,7 +81,7 @@ int main(int argc, char** argv) { // Connect to device and start pipeline dai::Device device(pipeline); - //Output queues will be used to get the rgb frames and nn data from the outputs defined above + // Output queues will be used to get the rgb frames and nn data from the outputs defined above auto qRgb = device.getOutputQueue("rgb", 4, false); auto qDet = device.getOutputQueue("detections", 4, false); @@ -130,7 +130,7 @@ int main(int argc, char** argv) { } std::stringstream fpsStr; - fpsStr << "NN fps: "<< std::fixed << std::setprecision(2) << fps; + fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps; cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, 255, 0, 0); displayFrame("video", frame, detections); diff --git a/examples/src/utility.cpp b/examples/src/utility.cpp index cc7ff6c79..16fd91d74 100644 --- a/examples/src/utility.cpp +++ b/examples/src/utility.cpp @@ -3,6 +3,28 @@ // libraries #include "fp16/fp16.h" +#include "errno.h" + +#if (defined(_WIN32) || defined(_WIN64)) +#include +#include +#else +#include +#endif + +int createDirectory(std::string directory) +{ + int ret = 0; +#if (defined(_WIN32) || defined(_WIN64)) + ret = _mkdir(directory.c_str()); +#else + ret = mkdir(directory.c_str(), 0777); +#endif + if (ret == EEXIST) ret = 0; + return ret; +} + + cv::Mat toMat(const std::vector& data, int w, int h , int numPlanes, int bpp){ cv::Mat frame; diff --git a/examples/src/utility.hpp b/examples/src/utility.hpp index 9f0d75807..10f6facde 100644 --- a/examples/src/utility.hpp +++ b/examples/src/utility.hpp @@ -5,3 +5,4 @@ cv::Mat toMat(const std::vector& data, int w, int h , int numPlanes, int bpp); void toPlanar(cv::Mat& bgr, std::vector& data); cv::Mat resizeKeepAspectRatio(const cv::Mat &input, const cv::Size &dstSize, const cv::Scalar &bgcolor); +int createDirectory(std::string directory); diff --git a/examples/src/video_mobilenet.cpp b/examples/src/video_mobilenet.cpp index bb38f563d..49083a557 100644 --- a/examples/src/video_mobilenet.cpp +++ b/examples/src/video_mobilenet.cpp @@ -107,8 +107,7 @@ int main(int argc, char** argv) { displayFrame("inFrame", frame, detections); int key = cv::waitKey(1); - if(key == 'q' || key == 'Q') - return 0; + if(key == 'q' || key == 'Q') return 0; } return 0; } From d6ed86753692378536257b04ff76bbd9f4eb3b2a Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Mon, 10 May 2021 23:18:59 +0300 Subject: [PATCH 34/56] Comments --- examples/src/depth_crop_control.cpp | 2 +- examples/src/depth_preview.cpp | 3 +++ examples/src/encoding_max_limit.cpp | 1 + examples/src/image_manip_example.cpp | 13 ++++--------- examples/src/image_manip_warp.cpp | 1 + examples/src/mjpeg_encoding_example.cpp | 5 +++-- examples/src/mono_camera_control.cpp | 2 +- examples/src/mono_full_resolution_saver.cpp | 3 ++- examples/src/mono_preview.cpp | 2 +- examples/src/rgb_camera_control.cpp | 1 + examples/src/rgb_depth_aligned.cpp | 1 + examples/src/rgb_encoding.cpp | 5 +++-- examples/src/rgb_full_resolution_saver.cpp | 3 ++- examples/src/rgb_mono_encoding.cpp | 1 + examples/src/rgb_preview.cpp | 1 + examples/src/spatial_location_calculator.cpp | 2 +- examples/src/stereo_depth_video.cpp | 7 +++++-- examples/src/system_information.cpp | 10 ++-------- 18 files changed, 34 insertions(+), 29 deletions(-) diff --git a/examples/src/depth_crop_control.cpp b/examples/src/depth_crop_control.cpp index 8ef97a294..bd0ef2f0f 100644 --- a/examples/src/depth_crop_control.cpp +++ b/examples/src/depth_crop_control.cpp @@ -14,7 +14,7 @@ static constexpr float stepSize = 0.02; static std::atomic sendCamConfig{false}; int main() { - // Start defining a pipeline + // Create pipeline dai::Pipeline pipeline; // Define sources and outputs diff --git a/examples/src/depth_preview.cpp b/examples/src/depth_preview.cpp index b484ae2fe..c78b2819b 100644 --- a/examples/src/depth_preview.cpp +++ b/examples/src/depth_preview.cpp @@ -14,8 +14,10 @@ static std::atomic lr_check{false}; int main() { using namespace std; + // Create pipeline dai::Pipeline pipeline; + // Define sources and outputs auto monoLeft = pipeline.create(); auto monoRight = pipeline.create(); auto depth = pipeline.create(); @@ -23,6 +25,7 @@ int main() { xout->setStreamName("disparity"); + // Properties monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); diff --git a/examples/src/encoding_max_limit.cpp b/examples/src/encoding_max_limit.cpp index d380441a5..54794e36a 100644 --- a/examples/src/encoding_max_limit.cpp +++ b/examples/src/encoding_max_limit.cpp @@ -16,6 +16,7 @@ int main(int argc, char** argv) { using namespace std::chrono; std::signal(SIGINT, &sigintHandler); + // Create pipeline dai::Pipeline pipeline; // Define a source - color camera and 2 mono camera diff --git a/examples/src/image_manip_example.cpp b/examples/src/image_manip_example.cpp index 866077ce9..610d1c092 100644 --- a/examples/src/image_manip_example.cpp +++ b/examples/src/image_manip_example.cpp @@ -7,8 +7,10 @@ #include "depthai/depthai.hpp" int main() { + // Create pipeline dai::Pipeline pipeline; + // Define sources and outputs auto camRgb = pipeline.create(); auto imageManip = pipeline.create(); auto imageManip2 = pipeline.create(); @@ -22,6 +24,7 @@ int main() { manipOut2->setStreamName("manip2"); manip2In->setStreamName("manip2In"); + // Properties camRgb->setPreviewSize(304, 304); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); camRgb->setInterleaved(false); @@ -35,20 +38,12 @@ int main() { imageManip2->initialConfig.setCropRect(0.1, 0.1, 0.3, 0.3); imageManip2->setWaitForConfigInput(true); - // Link nodes CAM -> XLINK + // Linking camRgb->preview.link(camOut->input); - - // Link nodes CAM -> imageManip -> XLINK camRgb->preview.link(imageManip->inputImage); imageManip->out.link(manipOut->input); - - // ImageManip -> ImageManip 2 imageManip->out.link(imageManip2->inputImage); - - // ImageManip2 -> XLinkOut imageManip2->out.link(manipOut2->input); - - // Host config -> image manip 2 manip2In->out.link(imageManip2->inputConfig); // Connect to device and start pipeline diff --git a/examples/src/image_manip_warp.cpp b/examples/src/image_manip_warp.cpp index 1271380ec..ba91e8443 100644 --- a/examples/src/image_manip_warp.cpp +++ b/examples/src/image_manip_warp.cpp @@ -56,6 +56,7 @@ std::vector warpList = { }; int main() { + // Create pipeline dai::Pipeline pipeline; // Define sources and outputs diff --git a/examples/src/mjpeg_encoding_example.cpp b/examples/src/mjpeg_encoding_example.cpp index 89c0a84bc..8c7b018c6 100644 --- a/examples/src/mjpeg_encoding_example.cpp +++ b/examples/src/mjpeg_encoding_example.cpp @@ -10,14 +10,15 @@ int main(int argc, char** argv) { using namespace std; using namespace std::chrono; + // Create pipeline dai::Pipeline pipeline; + // Define sources and outputs auto camRgb = pipeline.create(); auto xout = pipeline.create(); auto xout2 = pipeline.create(); auto videnc = pipeline.create(); - // XLinkOut xout->setStreamName("mjpeg"); xout2->setStreamName("preview"); @@ -30,7 +31,7 @@ int main(int argc, char** argv) { // VideoEncoder videnc->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::MJPEG); - // Link plugins CAM -> XLINK + // Linking camRgb->video.link(videnc->input); camRgb->preview.link(xout2->input); videnc->bitstream.link(xout->input); diff --git a/examples/src/mono_camera_control.cpp b/examples/src/mono_camera_control.cpp index 832c7289b..78a51216c 100644 --- a/examples/src/mono_camera_control.cpp +++ b/examples/src/mono_camera_control.cpp @@ -27,7 +27,7 @@ static int clamp(int num, int v0, int v1) { static std::atomic sendCamConfig{false}; int main() { - // Start defining a pipeline + // Create pipeline dai::Pipeline pipeline; // Define sources and outputs diff --git a/examples/src/mono_full_resolution_saver.cpp b/examples/src/mono_full_resolution_saver.cpp index 0f69a984a..e02ef7163 100644 --- a/examples/src/mono_full_resolution_saver.cpp +++ b/examples/src/mono_full_resolution_saver.cpp @@ -12,9 +12,10 @@ int main(int argc, char** argv) { using namespace std; using namespace std::chrono; + // Create pipeline dai::Pipeline pipeline; - // Define sources and outputs + // Define source and output auto monoRight = pipeline.create(); auto xoutRight = pipeline.create(); diff --git a/examples/src/mono_preview.cpp b/examples/src/mono_preview.cpp index 3fd8c26e5..4283def23 100644 --- a/examples/src/mono_preview.cpp +++ b/examples/src/mono_preview.cpp @@ -6,7 +6,7 @@ int main() { using namespace std; - // Start defining a pipeline + // Create pipeline dai::Pipeline pipeline; // Define sources and outputs diff --git a/examples/src/rgb_camera_control.cpp b/examples/src/rgb_camera_control.cpp index cfa24f7f6..84fa270df 100644 --- a/examples/src/rgb_camera_control.cpp +++ b/examples/src/rgb_camera_control.cpp @@ -31,6 +31,7 @@ static int clamp(int num, int v0, int v1) { } int main() { + // Create pipeline dai::Pipeline pipeline; // Define sources and outputs diff --git a/examples/src/rgb_depth_aligned.cpp b/examples/src/rgb_depth_aligned.cpp index 20ba2ad13..d743d8f54 100644 --- a/examples/src/rgb_depth_aligned.cpp +++ b/examples/src/rgb_depth_aligned.cpp @@ -13,6 +13,7 @@ static constexpr bool downscaleColor = true; int main() { using namespace std; + // Create pipeline dai::Pipeline pipeline; std::vector queueNames; diff --git a/examples/src/rgb_encoding.cpp b/examples/src/rgb_encoding.cpp index a7446a97c..7400ac02e 100644 --- a/examples/src/rgb_encoding.cpp +++ b/examples/src/rgb_encoding.cpp @@ -12,9 +12,9 @@ static void sigintHandler(int signum) { } int main(int argc, char** argv) { - // Detect signal interrupts (Ctrl + C) std::signal(SIGINT, &sigintHandler); + // Create pipeline dai::Pipeline pipeline; // Define sources and outputs @@ -23,13 +23,14 @@ int main(int argc, char** argv) { auto xout = pipeline.create(); xout->setStreamName("h265"); + // Properties camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); videoEnc->setDefaultProfilePreset(3840, 2160, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); - // Create outputs + // Linking camRgb->video.link(videoEnc->input); videoEnc->bitstream.link(xout->input); diff --git a/examples/src/rgb_full_resolution_saver.cpp b/examples/src/rgb_full_resolution_saver.cpp index 84308d65f..18c4a1f86 100644 --- a/examples/src/rgb_full_resolution_saver.cpp +++ b/examples/src/rgb_full_resolution_saver.cpp @@ -9,13 +9,14 @@ int main(int argc, char** argv) { using namespace std::chrono; + // Create pipeline dai::Pipeline pipeline; // Define source and outputs auto camRgb = pipeline.create(); + auto videoEnc = pipeline.create(); auto xoutJpeg = pipeline.create(); auto xoutRgb = pipeline.create(); - auto videoEnc = pipeline.create(); xoutJpeg->setStreamName("jpeg"); xoutRgb->setStreamName("rgb"); diff --git a/examples/src/rgb_mono_encoding.cpp b/examples/src/rgb_mono_encoding.cpp index 904a662aa..e943e9535 100644 --- a/examples/src/rgb_mono_encoding.cpp +++ b/examples/src/rgb_mono_encoding.cpp @@ -16,6 +16,7 @@ int main(int argc, char** argv) { using namespace std::chrono; std::signal(SIGINT, &sigintHandler); + // Create pipeline dai::Pipeline pipeline; // Define sources and outputs diff --git a/examples/src/rgb_preview.cpp b/examples/src/rgb_preview.cpp index 650810148..8dbc7cdc7 100644 --- a/examples/src/rgb_preview.cpp +++ b/examples/src/rgb_preview.cpp @@ -7,6 +7,7 @@ int main() { using namespace std; + // Create pipeline dai::Pipeline pipeline; // Define source and output diff --git a/examples/src/spatial_location_calculator.cpp b/examples/src/spatial_location_calculator.cpp index 99a9e5d2c..108e5fb63 100644 --- a/examples/src/spatial_location_calculator.cpp +++ b/examples/src/spatial_location_calculator.cpp @@ -13,7 +13,7 @@ static std::atomic newConfig{false}; int main() { using namespace std; - // Start defining a pipeline + // Create pipeline dai::Pipeline pipeline; // Define sources and outputs diff --git a/examples/src/stereo_depth_video.cpp b/examples/src/stereo_depth_video.cpp index 1522c5eef..bfbad4c3c 100644 --- a/examples/src/stereo_depth_video.cpp +++ b/examples/src/stereo_depth_video.cpp @@ -12,13 +12,16 @@ int main() { // TODO - split this example into two separate examples bool withDepth = true; + // Create pipeline dai::Pipeline pipeline; + // Define sources and outputs auto monoLeft = pipeline.create(); auto monoRight = pipeline.create(); + auto stereo = withDepth ? pipeline.create() : nullptr; + auto xoutLeft = pipeline.create(); auto xoutRight = pipeline.create(); - auto stereo = withDepth ? pipeline.create() : nullptr; auto xoutDisp = pipeline.create(); auto xoutDepth = pipeline.create(); auto xoutRectifL = pipeline.create(); @@ -64,7 +67,7 @@ int main() { stereo->setExtendedDisparity(extended); stereo->setSubpixel(subpixel); - // Link plugins CAM -> STEREO -> XLINK + // Linking monoLeft->out.link(stereo->left); monoRight->out.link(stereo->right); diff --git a/examples/src/system_information.cpp b/examples/src/system_information.cpp index 96f2b729a..72695b9c6 100644 --- a/examples/src/system_information.cpp +++ b/examples/src/system_information.cpp @@ -21,9 +21,10 @@ void printSystemInformation(dai::SystemInformation info) { int main() { using namespace std; - // Start defining a pipeline + // Create pipeline dai::Pipeline pipeline; + // Define source and output auto sysLog = pipeline.create(); auto xout = pipeline.create(); @@ -38,13 +39,6 @@ int main() { // Connect to device dai::Device device; - // Query device (before pipeline starts) - dai::MemoryInfo ddr = device.getDdrMemoryUsage(); - printf("Ddr used / total - %.2f / %.2f MiB\n", ddr.used / (1024.0f * 1024.0f), ddr.total / (1024.0f * 1024.0f)); - - dai::MemoryInfo cmx = device.getCmxMemoryUsage(); - printf("Cmx used / total - %.2f / %.2f MiB\n", cmx.used / (1024.0f * 1024.0f), cmx.total / (1024.0f * 1024.0f)); - // Connect to device and start pipeline device.startPipeline(pipeline); From 03a1773dcbb1e0aab71e1a2c19bc468cf2f5d0ca Mon Sep 17 00:00:00 2001 From: Erol444 Date: Tue, 11 May 2021 23:57:46 +0100 Subject: [PATCH 35/56] added a function in stereo node that returns the maxDisparity. Also changed stereo_example.cpp to use this new function --- examples/src/stereo_example.cpp | 6 +----- include/depthai/pipeline/node/StereoDepth.hpp | 6 ++++++ src/pipeline/node/StereoDepth.cpp | 7 +++++++ 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/examples/src/stereo_example.cpp b/examples/src/stereo_example.cpp index 935698e69..10a4ed60b 100644 --- a/examples/src/stereo_example.cpp +++ b/examples/src/stereo_example.cpp @@ -50,10 +50,6 @@ int main() { bool extended = false; bool subpixel = true; - int maxDisp = 96; - if(extended) maxDisp *= 2; - if(subpixel) maxDisp *= 32; // 5 bits fractional disparity - if(withDepth) { // StereoDepth stereo->setConfidenceThreshold(200); @@ -107,7 +103,7 @@ int main() { // Note: in some configurations (if depth is enabled), disparity may output garbage data auto disparity = dispQueue->get(); cv::Mat disp(disparity->getHeight(), disparity->getWidth(), subpixel ? CV_16UC1 : CV_8UC1, disparity->getData().data()); - disp.convertTo(disp, CV_8UC1, 255.0 / maxDisp); // Extend disparity range + disp.convertTo(disp, CV_8UC1, 255 / stereo->getMaxDisparity()); // Extend disparity range cv::imshow("disparity", disp); cv::Mat disp_color; cv::applyColorMap(disp, disp_color, cv::COLORMAP_JET); diff --git a/include/depthai/pipeline/node/StereoDepth.hpp b/include/depthai/pipeline/node/StereoDepth.hpp index b45503d11..bc0237566 100644 --- a/include/depthai/pipeline/node/StereoDepth.hpp +++ b/include/depthai/pipeline/node/StereoDepth.hpp @@ -174,6 +174,12 @@ class StereoDepth : public Node { * DEPRECATED. The output is auto-enabled if used */ [[deprecated("Function call should be removed")]] void setOutputDepth(bool enable); + + /** + * Useful for normalization of the disparity map. + * @returns Number of disparity levels + */ + float getMaxDisparity(); }; } // namespace node diff --git a/src/pipeline/node/StereoDepth.cpp b/src/pipeline/node/StereoDepth.cpp index 57219d18a..eac7e45c5 100644 --- a/src/pipeline/node/StereoDepth.cpp +++ b/src/pipeline/node/StereoDepth.cpp @@ -103,5 +103,12 @@ void StereoDepth::setOutputDepth(bool enable) { spdlog::warn("{} is deprecated. The output is auto-enabled if used", __func__); } +float StereoDepth::getMaxDisparity(void) { + float maxDisp = 95.0; + if (properties.enableExtendedDisparity) maxDisp *= 2; + if (properties.enableLeftRightCheck) maxDisp *= 32; + return maxDisp; +} + } // namespace node } // namespace dai From 8ce04da9055421a9e2bca07707efabc0afa76b87 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Wed, 12 May 2021 22:43:38 +0100 Subject: [PATCH 36/56] fixes for the PR --- include/depthai/pipeline/node/StereoDepth.hpp | 4 ++-- src/pipeline/node/StereoDepth.cpp | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/depthai/pipeline/node/StereoDepth.hpp b/include/depthai/pipeline/node/StereoDepth.hpp index bc0237566..407226092 100644 --- a/include/depthai/pipeline/node/StereoDepth.hpp +++ b/include/depthai/pipeline/node/StereoDepth.hpp @@ -177,9 +177,9 @@ class StereoDepth : public Node { /** * Useful for normalization of the disparity map. - * @returns Number of disparity levels + * @returns Maximum disparity value that the node can return */ - float getMaxDisparity(); + float getMaxDisparity() const; }; } // namespace node diff --git a/src/pipeline/node/StereoDepth.cpp b/src/pipeline/node/StereoDepth.cpp index eac7e45c5..fc856026d 100644 --- a/src/pipeline/node/StereoDepth.cpp +++ b/src/pipeline/node/StereoDepth.cpp @@ -103,10 +103,10 @@ void StereoDepth::setOutputDepth(bool enable) { spdlog::warn("{} is deprecated. The output is auto-enabled if used", __func__); } -float StereoDepth::getMaxDisparity(void) { +float StereoDepth::getMaxDisparity(void) const { float maxDisp = 95.0; - if (properties.enableExtendedDisparity) maxDisp *= 2; - if (properties.enableLeftRightCheck) maxDisp *= 32; + if(properties.enableExtendedDisparity) maxDisp *= 2; + if(properties.enableLeftRightCheck) maxDisp *= 32; return maxDisp; } From a84d29330f883bc9fd96186f8c8a47271df11805 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Fri, 14 May 2021 11:12:08 +0100 Subject: [PATCH 37/56] removed void from the function's arguments --- src/pipeline/node/StereoDepth.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pipeline/node/StereoDepth.cpp b/src/pipeline/node/StereoDepth.cpp index fc856026d..944e46323 100644 --- a/src/pipeline/node/StereoDepth.cpp +++ b/src/pipeline/node/StereoDepth.cpp @@ -103,7 +103,7 @@ void StereoDepth::setOutputDepth(bool enable) { spdlog::warn("{} is deprecated. The output is auto-enabled if used", __func__); } -float StereoDepth::getMaxDisparity(void) const { +float StereoDepth::getMaxDisparity() const { float maxDisp = 95.0; if(properties.enableExtendedDisparity) maxDisp *= 2; if(properties.enableLeftRightCheck) maxDisp *= 32; From 54c38ea06f05447032250168b8a7e81a8cbf9ce5 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Fri, 14 May 2021 22:40:14 +0100 Subject: [PATCH 38/56] Fixed getMaxDisparity calculation --- src/pipeline/node/StereoDepth.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pipeline/node/StereoDepth.cpp b/src/pipeline/node/StereoDepth.cpp index 944e46323..f441be127 100644 --- a/src/pipeline/node/StereoDepth.cpp +++ b/src/pipeline/node/StereoDepth.cpp @@ -106,7 +106,7 @@ void StereoDepth::setOutputDepth(bool enable) { float StereoDepth::getMaxDisparity() const { float maxDisp = 95.0; if(properties.enableExtendedDisparity) maxDisp *= 2; - if(properties.enableLeftRightCheck) maxDisp *= 32; + if(properties.enableSubpixel) maxDisp *= 32; return maxDisp; } From c58a1c0364e53a2db2a4e3760ea95d64c834ccba Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Sun, 16 May 2021 02:50:36 +0200 Subject: [PATCH 39/56] Added getUsbSpeed --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- include/depthai/device/Device.hpp | 8 ++++++++ shared/depthai-shared | 2 +- src/device/Device.cpp | 6 ++++++ 4 files changed, 16 insertions(+), 2 deletions(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index 88d2756dd..13d38e346 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "fddbe1b7868bdb67945b50ddc9dfaeb15bba2d60") +set(DEPTHAI_DEVICE_SIDE_COMMIT "793bf951382470ed75ed55f4531a7cc528728e8a") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/include/depthai/device/Device.hpp b/include/depthai/device/Device.hpp index 9d575bc17..c6d52b157 100644 --- a/include/depthai/device/Device.hpp +++ b/include/depthai/device/Device.hpp @@ -20,6 +20,7 @@ #include "depthai-shared/common/ChipTemperature.hpp" #include "depthai-shared/common/CpuUsage.hpp" #include "depthai-shared/common/MemoryInfo.hpp" +#include "depthai-shared/common/UsbSpeed.hpp" #include "depthai-shared/log/LogLevel.hpp" #include "depthai-shared/log/LogMessage.hpp" @@ -451,6 +452,13 @@ class Device { */ CpuUsage getLeonMssCpuUsage(); + /** + * Retrieves USB connection speed + * + * @returns USB connection speed of connected device if applicable. Unknown otherwise. + */ + UsbSpeed getUsbSpeed(); + /** * Explicitly closes connection to device. * @note This function does not need to be explicitly called diff --git a/shared/depthai-shared b/shared/depthai-shared index cd7a857f4..f99b4b47e 160000 --- a/shared/depthai-shared +++ b/shared/depthai-shared @@ -1 +1 @@ -Subproject commit cd7a857f4bc4a4d0e5348a6cc6a536c0a3abc7b7 +Subproject commit f99b4b47e0864dc8a533ff421531445942164649 diff --git a/src/device/Device.cpp b/src/device/Device.cpp index ed3c425c5..a614d95a7 100644 --- a/src/device/Device.cpp +++ b/src/device/Device.cpp @@ -838,6 +838,12 @@ CpuUsage Device::getLeonMssCpuUsage() { return client->call("getLeonMssCpuUsage").as(); } +UsbSpeed Device::getUsbSpeed() { + checkClosed(); + + return client->call("getUsbSpeed").as(); +} + bool Device::isPipelineRunning() { checkClosed(); From 0259335fd11ef5b1afd67eaf41a7fa269cc0f534 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Mon, 17 May 2021 03:07:47 +0200 Subject: [PATCH 40/56] Added printing helpers and UsbSpeed example --- examples/src/camera_preview_example.cpp | 5 +++- include/depthai/common/CameraBoardSocket.hpp | 25 ++++++++++++++++ include/depthai/common/UsbSpeed.hpp | 31 ++++++++++++++++++++ include/depthai/device/Device.hpp | 4 +-- include/depthai/pipeline/node/MonoCamera.hpp | 2 +- 5 files changed, 63 insertions(+), 4 deletions(-) create mode 100644 include/depthai/common/CameraBoardSocket.hpp create mode 100644 include/depthai/common/UsbSpeed.hpp diff --git a/examples/src/camera_preview_example.cpp b/examples/src/camera_preview_example.cpp index 82b7c99c7..95f6c179b 100644 --- a/examples/src/camera_preview_example.cpp +++ b/examples/src/camera_preview_example.cpp @@ -34,10 +34,13 @@ int main() { cout << "Connected cameras: "; for(const auto& cam : d.getConnectedCameras()) { - cout << static_cast(cam) << " "; + cout << cam << " "; } cout << endl; + // Print USB speed + cout << "Usb speed: " << d.getUsbSpeed() << endl; + // Start the pipeline d.startPipeline(p); diff --git a/include/depthai/common/CameraBoardSocket.hpp b/include/depthai/common/CameraBoardSocket.hpp new file mode 100644 index 000000000..acb02fc05 --- /dev/null +++ b/include/depthai/common/CameraBoardSocket.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include "depthai-shared/common/CameraBoardSocket.hpp" + +namespace dai { + +inline std::ostream& operator<<(std::ostream& out, const CameraBoardSocket& socket) { + switch(socket) { + case CameraBoardSocket::AUTO : + out << "AUTO"; + break; + case CameraBoardSocket::RGB : + out << "RGB"; + break; + case CameraBoardSocket::LEFT : + out << "LEFT"; + break; + case CameraBoardSocket::RIGHT : + out << "RIGHT"; + break; + } + return out; +} + +} // namespace dai \ No newline at end of file diff --git a/include/depthai/common/UsbSpeed.hpp b/include/depthai/common/UsbSpeed.hpp new file mode 100644 index 000000000..7b2669ccf --- /dev/null +++ b/include/depthai/common/UsbSpeed.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include "depthai-shared/common/UsbSpeed.hpp" + +namespace dai { + +inline std::ostream& operator<<(std::ostream& out, const UsbSpeed& speed) { + switch(speed) { + case UsbSpeed::UNKNOWN : + out << "UNKNOWN"; + break; + case UsbSpeed::LOW : + out << "LOW"; + break; + case UsbSpeed::FULL : + out << "FULL"; + break; + case UsbSpeed::HIGH : + out << "HIGH"; + break; + case UsbSpeed::SUPER : + out << "SUPER"; + break; + case UsbSpeed::SUPER_PLUS : + out << "SUPER_PLUS"; + break; + } + return out; +} + +} // namespace dai diff --git a/include/depthai/device/Device.hpp b/include/depthai/device/Device.hpp index c6d52b157..3d4991f11 100644 --- a/include/depthai/device/Device.hpp +++ b/include/depthai/device/Device.hpp @@ -10,17 +10,17 @@ // project #include "CallbackHandler.hpp" #include "DataQueue.hpp" +#include "depthai/common/CameraBoardSocket.hpp" +#include "depthai/common/UsbSpeed.hpp" #include "depthai/pipeline/Pipeline.hpp" #include "depthai/utility/Pimpl.hpp" #include "depthai/xlink/XLinkConnection.hpp" #include "depthai/xlink/XLinkStream.hpp" // shared -#include "depthai-shared/common/CameraBoardSocket.hpp" #include "depthai-shared/common/ChipTemperature.hpp" #include "depthai-shared/common/CpuUsage.hpp" #include "depthai-shared/common/MemoryInfo.hpp" -#include "depthai-shared/common/UsbSpeed.hpp" #include "depthai-shared/log/LogLevel.hpp" #include "depthai-shared/log/LogMessage.hpp" diff --git a/include/depthai/pipeline/node/MonoCamera.hpp b/include/depthai/pipeline/node/MonoCamera.hpp index 0978e55bf..78a57f5bf 100644 --- a/include/depthai/pipeline/node/MonoCamera.hpp +++ b/include/depthai/pipeline/node/MonoCamera.hpp @@ -2,10 +2,10 @@ #include +#include "depthai/common/CameraBoardSocket.hpp" #include "depthai/pipeline/Node.hpp" // shared -#include #include namespace dai { From 72b7400f57a0dd50ced61a9f3b4e9ff212814ccf Mon Sep 17 00:00:00 2001 From: Martin Peterlin Date: Mon, 17 May 2021 16:16:39 +0200 Subject: [PATCH 41/56] Fixed style --- include/depthai/common/CameraBoardSocket.hpp | 8 ++++---- include/depthai/common/UsbSpeed.hpp | 14 +++++++------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/include/depthai/common/CameraBoardSocket.hpp b/include/depthai/common/CameraBoardSocket.hpp index acb02fc05..b27bedd0b 100644 --- a/include/depthai/common/CameraBoardSocket.hpp +++ b/include/depthai/common/CameraBoardSocket.hpp @@ -6,16 +6,16 @@ namespace dai { inline std::ostream& operator<<(std::ostream& out, const CameraBoardSocket& socket) { switch(socket) { - case CameraBoardSocket::AUTO : + case CameraBoardSocket::AUTO: out << "AUTO"; break; - case CameraBoardSocket::RGB : + case CameraBoardSocket::RGB: out << "RGB"; break; - case CameraBoardSocket::LEFT : + case CameraBoardSocket::LEFT: out << "LEFT"; break; - case CameraBoardSocket::RIGHT : + case CameraBoardSocket::RIGHT: out << "RIGHT"; break; } diff --git a/include/depthai/common/UsbSpeed.hpp b/include/depthai/common/UsbSpeed.hpp index 7b2669ccf..59ccdcb5e 100644 --- a/include/depthai/common/UsbSpeed.hpp +++ b/include/depthai/common/UsbSpeed.hpp @@ -6,26 +6,26 @@ namespace dai { inline std::ostream& operator<<(std::ostream& out, const UsbSpeed& speed) { switch(speed) { - case UsbSpeed::UNKNOWN : + case UsbSpeed::UNKNOWN: out << "UNKNOWN"; break; - case UsbSpeed::LOW : + case UsbSpeed::LOW: out << "LOW"; break; - case UsbSpeed::FULL : + case UsbSpeed::FULL: out << "FULL"; break; - case UsbSpeed::HIGH : + case UsbSpeed::HIGH: out << "HIGH"; break; - case UsbSpeed::SUPER : + case UsbSpeed::SUPER: out << "SUPER"; break; - case UsbSpeed::SUPER_PLUS : + case UsbSpeed::SUPER_PLUS: out << "SUPER_PLUS"; break; } return out; } -} // namespace dai +} // namespace dai From 2c27af5440aa988275e38786d96c154edf811fe8 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Mon, 17 May 2021 20:22:23 +0300 Subject: [PATCH 42/56] Remove unnecessary libraries, improving the code --- examples/src/depth_crop_control.cpp | 1 - examples/src/depth_preview.cpp | 3 --- examples/src/device_queue_event.cpp | 7 +------ examples/src/encoding_max_limit.cpp | 21 +++++++++---------- examples/src/image_manip_example.cpp | 1 - examples/src/image_manip_warp.cpp | 1 - examples/src/mjpeg_encoding_example.cpp | 7 +------ examples/src/mono_camera_control.cpp | 1 - examples/src/mono_depth_mobilenetssd.cpp | 3 --- examples/src/mono_full_resolution_saver.cpp | 9 ++------ examples/src/mono_mobilenet.cpp | 1 - examples/src/mono_preview.cpp | 2 -- examples/src/object_tracker.cpp | 1 - examples/src/opencv_support.cpp | 4 ---- examples/src/rgb_camera_control.cpp | 1 - examples/src/rgb_depth_aligned.cpp | 2 +- examples/src/rgb_encoding.cpp | 8 +++---- examples/src/rgb_encoding_mobilenet.cpp | 7 ++----- examples/src/rgb_encoding_mono_mobilenet.cpp | 7 ++----- .../src/rgb_encoding_mono_mobilenet_depth.cpp | 6 ++---- examples/src/rgb_full_resolution_saver.cpp | 5 ++--- examples/src/rgb_mobilenet.cpp | 4 +++- examples/src/rgb_mobilenet_4k.cpp | 9 ++++---- examples/src/rgb_mono_encoding.cpp | 14 ++++++------- examples/src/rgb_preview.cpp | 5 +---- examples/src/rgb_video.cpp | 3 --- examples/src/spatial_location_calculator.cpp | 1 - examples/src/spatial_mobilenet.cpp | 8 +++---- examples/src/spatial_mobilenet_mono.cpp | 8 +++---- examples/src/spatial_object_tracker.cpp | 4 +--- examples/src/spatial_tiny_yolo.cpp | 4 +--- examples/src/stereo_depth_video.cpp | 18 +++++++--------- examples/src/system_information.cpp | 3 --- .../src/tiny_yolo_v3_device_side_decoding.cpp | 3 --- .../src/tiny_yolo_v4_device_side_decoding.cpp | 3 --- examples/src/video_mobilenet.cpp | 1 - examples/src/webcam_mobilenet_example.cpp | 1 - 37 files changed, 56 insertions(+), 131 deletions(-) diff --git a/examples/src/depth_crop_control.cpp b/examples/src/depth_crop_control.cpp index bd0ef2f0f..4265bffbd 100644 --- a/examples/src/depth_crop_control.cpp +++ b/examples/src/depth_crop_control.cpp @@ -2,7 +2,6 @@ * This example shows usage of depth camera in crop mode with the possibility to move the crop. * Use 'WASD' in order to do it. */ -#include #include // Includes common necessary includes for development using depthai library diff --git a/examples/src/depth_preview.cpp b/examples/src/depth_preview.cpp index c78b2819b..99f55a173 100644 --- a/examples/src/depth_preview.cpp +++ b/examples/src/depth_preview.cpp @@ -1,4 +1,3 @@ -#include #include // Inludes common necessary includes for development using depthai library @@ -12,8 +11,6 @@ static std::atomic subpixel{false}; static std::atomic lr_check{false}; int main() { - using namespace std; - // Create pipeline dai::Pipeline pipeline; diff --git a/examples/src/device_queue_event.cpp b/examples/src/device_queue_event.cpp index 5bab6be0e..056cb54cb 100644 --- a/examples/src/device_queue_event.cpp +++ b/examples/src/device_queue_event.cpp @@ -1,14 +1,9 @@ -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" -int main(int argc, char** argv) { - using namespace std; - +int main() { // Create pipeline dai::Pipeline pipeline; diff --git a/examples/src/encoding_max_limit.cpp b/examples/src/encoding_max_limit.cpp index 54794e36a..943d53f86 100644 --- a/examples/src/encoding_max_limit.cpp +++ b/examples/src/encoding_max_limit.cpp @@ -1,5 +1,4 @@ #include -#include #include // Inludes common necessary includes for development using depthai library @@ -11,7 +10,7 @@ static void sigintHandler(int signum) { alive = false; } -int main(int argc, char** argv) { +int main() { using namespace std; using namespace std::chrono; std::signal(SIGINT, &sigintHandler); @@ -19,7 +18,7 @@ int main(int argc, char** argv) { // Create pipeline dai::Pipeline pipeline; - // Define a source - color camera and 2 mono camera + // Define sources and outputs auto camRgb = pipeline.create(); auto monoCam = pipeline.create(); auto monoCam2 = pipeline.create(); @@ -64,10 +63,10 @@ int main(int argc, char** argv) { auto outQ3 = device.getOutputQueue("ve3Out", 30, true); // The .h264 / .h265 files are raw stream files (not playable yet) - auto videoFile1 = std::ofstream("mono1.h264", std::ios::binary); - auto videoFile2 = std::ofstream("color.h265", std::ios::binary); - auto videoFile3 = std::ofstream("mono2.h264", std::ios::binary); - std::cout << "Press Ctrl+C to stop encoding..." << std::endl; + auto videoFile1 = ofstream("mono1.h264", ios::binary); + auto videoFile2 = ofstream("color.h265", ios::binary); + auto videoFile3 = ofstream("mono2.h264", ios::binary); + cout << "Press Ctrl+C to stop encoding..." << endl; while(alive) { auto out1 = outQ1->get(); @@ -78,10 +77,10 @@ int main(int argc, char** argv) { videoFile3.write((char*)out3->getData().data(), out3->getData().size()); } - std::cout << "To view the encoded data, convert the stream file (.h264/.h265) into a video file (.mp4), using a command below:" << std::endl; - std::cout << "ffmpeg -framerate 25 -i mono1.h264 -c copy mono1.mp4" << std::endl; - std::cout << "ffmpeg -framerate 25 -i mono2.h264 -c copy mono2.mp4" << std::endl; - std::cout << "ffmpeg -framerate 25 -i color.h265 -c copy color.mp4" << std::endl; + cout << "To view the encoded data, convert the stream file (.h264/.h265) into a video file (.mp4), using a command below:" << endl; + cout << "ffmpeg -framerate 25 -i mono1.h264 -c copy mono1.mp4" << endl; + cout << "ffmpeg -framerate 25 -i mono2.h264 -c copy mono2.mp4" << endl; + cout << "ffmpeg -framerate 25 -i color.h265 -c copy color.mp4" << endl; return 0; } diff --git a/examples/src/image_manip_example.cpp b/examples/src/image_manip_example.cpp index 610d1c092..c6278529a 100644 --- a/examples/src/image_manip_example.cpp +++ b/examples/src/image_manip_example.cpp @@ -1,4 +1,3 @@ -#include #include #include "utility.hpp" diff --git a/examples/src/image_manip_warp.cpp b/examples/src/image_manip_warp.cpp index ba91e8443..610cff4f8 100644 --- a/examples/src/image_manip_warp.cpp +++ b/examples/src/image_manip_warp.cpp @@ -1,4 +1,3 @@ -#include #include #include "depthai/depthai.hpp" diff --git a/examples/src/mjpeg_encoding_example.cpp b/examples/src/mjpeg_encoding_example.cpp index 8c7b018c6..8e1daacc1 100644 --- a/examples/src/mjpeg_encoding_example.cpp +++ b/examples/src/mjpeg_encoding_example.cpp @@ -1,13 +1,9 @@ -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" -int main(int argc, char** argv) { - using namespace std; +int main() { using namespace std::chrono; // Create pipeline @@ -69,6 +65,5 @@ int main(int argc, char** argv) { return 0; } } - return 0; } diff --git a/examples/src/mono_camera_control.cpp b/examples/src/mono_camera_control.cpp index 78a51216c..8636c0f43 100644 --- a/examples/src/mono_camera_control.cpp +++ b/examples/src/mono_camera_control.cpp @@ -7,7 +7,6 @@ * To go back to auto controls: * 'E' - autoexposure */ -#include #include // Includes common necessary includes for development using depthai library diff --git a/examples/src/mono_depth_mobilenetssd.cpp b/examples/src/mono_depth_mobilenetssd.cpp index 2863cb2a4..902b813b5 100644 --- a/examples/src/mono_depth_mobilenetssd.cpp +++ b/examples/src/mono_depth_mobilenetssd.cpp @@ -1,8 +1,5 @@ -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" diff --git a/examples/src/mono_full_resolution_saver.cpp b/examples/src/mono_full_resolution_saver.cpp index e02ef7163..2cfac6eab 100644 --- a/examples/src/mono_full_resolution_saver.cpp +++ b/examples/src/mono_full_resolution_saver.cpp @@ -1,17 +1,12 @@ #include -#include #include -#include "errno.h" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" #include "utility.hpp" -int main(int argc, char** argv) { - using namespace std; +int main() { using namespace std::chrono; - // Create pipeline dai::Pipeline pipeline; @@ -42,7 +37,7 @@ int main(int argc, char** argv) { // Frame is transformed and ready to be shown cv::imshow("right", inRight->getCvFrame()); - uint64_t time = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); + uint64_t time = duration_cast(system_clock::now().time_since_epoch()).count(); std::stringstream videoStr; videoStr << dirName << "/" << time << ".png"; // After showing the frame, it's being stored inside a target directory as a PNG image diff --git a/examples/src/mono_mobilenet.cpp b/examples/src/mono_mobilenet.cpp index 78a2bd767..9ad882756 100644 --- a/examples/src/mono_mobilenet.cpp +++ b/examples/src/mono_mobilenet.cpp @@ -1,4 +1,3 @@ -#include #include #include "utility.hpp" diff --git a/examples/src/mono_preview.cpp b/examples/src/mono_preview.cpp index 4283def23..cd53e08fe 100644 --- a/examples/src/mono_preview.cpp +++ b/examples/src/mono_preview.cpp @@ -1,11 +1,9 @@ -#include #include // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" int main() { - using namespace std; // Create pipeline dai::Pipeline pipeline; diff --git a/examples/src/object_tracker.cpp b/examples/src/object_tracker.cpp index 20a777313..c260b6983 100644 --- a/examples/src/object_tracker.cpp +++ b/examples/src/object_tracker.cpp @@ -1,5 +1,4 @@ #include -#include #include // Inludes common necessary includes for development using depthai library diff --git a/examples/src/opencv_support.cpp b/examples/src/opencv_support.cpp index 53c7c338d..e55af8c38 100644 --- a/examples/src/opencv_support.cpp +++ b/examples/src/opencv_support.cpp @@ -1,4 +1,3 @@ -#include #include // Inludes common necessary includes for development using depthai library @@ -8,9 +7,6 @@ #include int main() { - using namespace std; - using namespace std::chrono; - // Create pipeline dai::Pipeline pipeline; diff --git a/examples/src/rgb_camera_control.cpp b/examples/src/rgb_camera_control.cpp index 84fa270df..74b3c6412 100644 --- a/examples/src/rgb_camera_control.cpp +++ b/examples/src/rgb_camera_control.cpp @@ -10,7 +10,6 @@ * 'E' - autoexposure * 'F' - autofocus (continuous) */ -#include #include #include "utility.hpp" diff --git a/examples/src/rgb_depth_aligned.cpp b/examples/src/rgb_depth_aligned.cpp index d743d8f54..bd381b4c1 100644 --- a/examples/src/rgb_depth_aligned.cpp +++ b/examples/src/rgb_depth_aligned.cpp @@ -8,7 +8,7 @@ // Optional. If set (true), the ColorCamera is downscaled from 1080p to 720p. // Otherwise (false), the aligned depth is automatically upscaled to 1080p -static constexpr bool downscaleColor = true; +static std::atomic downscaleColor{true}; int main() { using namespace std; diff --git a/examples/src/rgb_encoding.cpp b/examples/src/rgb_encoding.cpp index 7400ac02e..b0f6b3c2a 100644 --- a/examples/src/rgb_encoding.cpp +++ b/examples/src/rgb_encoding.cpp @@ -1,5 +1,4 @@ #include -#include #include // Includes common necessary includes for development using depthai library @@ -12,6 +11,7 @@ static void sigintHandler(int signum) { } int main(int argc, char** argv) { + using namespace std; std::signal(SIGINT, &sigintHandler); // Create pipeline @@ -42,15 +42,15 @@ int main(int argc, char** argv) { // The .h265 file is a raw stream file (not playable yet) auto videoFile = std::ofstream("video.h265", std::ios::binary); - std::cout << "Press Ctrl+C to stop encoding..." << std::endl; + cout << "Press Ctrl+C to stop encoding..." << endl; while(alive) { auto h265Packet = q->get(); videoFile.write((char*)(h265Packet->getData().data()), h265Packet->getData().size()); } - std::cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4) using a command below:" << std::endl; - std::cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << std::endl; + cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4) using a command below:" << endl; + cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << endl; return 0; } diff --git a/examples/src/rgb_encoding_mobilenet.cpp b/examples/src/rgb_encoding_mobilenet.cpp index 40f155bf7..081bbd25b 100644 --- a/examples/src/rgb_encoding_mobilenet.cpp +++ b/examples/src/rgb_encoding_mobilenet.cpp @@ -1,8 +1,5 @@ -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" @@ -110,8 +107,8 @@ int main(int argc, char** argv) { int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { - std::cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << std::endl; - std::cout << "ffmpeg -framerate 30 -i video.h264 -c copy video.mp4" << std::endl; + cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << endl; + cout << "ffmpeg -framerate 30 -i video.h264 -c copy video.mp4" << endl; return 0; } } diff --git a/examples/src/rgb_encoding_mono_mobilenet.cpp b/examples/src/rgb_encoding_mono_mobilenet.cpp index 3c3a6dd2f..0fd43c6c6 100644 --- a/examples/src/rgb_encoding_mono_mobilenet.cpp +++ b/examples/src/rgb_encoding_mono_mobilenet.cpp @@ -1,8 +1,5 @@ -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" @@ -139,8 +136,8 @@ int main(int argc, char** argv) { int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { - std::cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << std::endl; - std::cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << std::endl; + cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << endl; + cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << endl; return 0; } } diff --git a/examples/src/rgb_encoding_mono_mobilenet_depth.cpp b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp index a74118d59..d6934a796 100644 --- a/examples/src/rgb_encoding_mono_mobilenet_depth.cpp +++ b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp @@ -1,8 +1,6 @@ #include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" @@ -188,8 +186,8 @@ int main(int argc, char** argv) { int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { - std::cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << std::endl; - std::cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << std::endl; + cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << endl; + cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << endl; return 0; } } diff --git a/examples/src/rgb_full_resolution_saver.cpp b/examples/src/rgb_full_resolution_saver.cpp index 18c4a1f86..785c4f964 100644 --- a/examples/src/rgb_full_resolution_saver.cpp +++ b/examples/src/rgb_full_resolution_saver.cpp @@ -1,12 +1,11 @@ #include -#include #include // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" #include "utility.hpp" -int main(int argc, char** argv) { +int main() { using namespace std::chrono; // Create pipeline @@ -49,7 +48,7 @@ int main(int argc, char** argv) { auto encFrames = qJpeg->tryGetAll(); for(const auto& encFrame : encFrames) { - uint64_t time = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); + uint64_t time = duration_cast(system_clock::now().time_since_epoch()).count(); std::stringstream videoStr; videoStr << dirName << "/" << time << ".jpeg"; auto videoFile = std::ofstream(videoStr.str(), std::ios::binary); diff --git a/examples/src/rgb_mobilenet.cpp b/examples/src/rgb_mobilenet.cpp index 15e745301..31bbce199 100644 --- a/examples/src/rgb_mobilenet.cpp +++ b/examples/src/rgb_mobilenet.cpp @@ -119,7 +119,9 @@ int main(int argc, char** argv) { displayFrame("video", frame, detections); int key = cv::waitKey(1); - if(key == 'q' || key == 'Q') return 0; + if(key == 'q' || key == 'Q') { + return 0; + } } return 0; } diff --git a/examples/src/rgb_mobilenet_4k.cpp b/examples/src/rgb_mobilenet_4k.cpp index bd30bcd43..aa498bfe4 100644 --- a/examples/src/rgb_mobilenet_4k.cpp +++ b/examples/src/rgb_mobilenet_4k.cpp @@ -1,8 +1,5 @@ -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" @@ -92,7 +89,7 @@ int main(int argc, char** argv) { cv::namedWindow("video", cv::WINDOW_NORMAL); cv::resizeWindow("video", 1280, 720); - std::cout << "Resize video window with mouse drag!" << std::endl; + cout << "Resize video window with mouse drag!" << endl; while(true) { auto inVideo = qVideo->get(); @@ -106,7 +103,9 @@ int main(int argc, char** argv) { displayFrame("preview", previewFrame, detections); int key = cv::waitKey(1); - if(key == 'q' || key == 'Q') return 0; + if(key == 'q' || key == 'Q') { + return 0; + } } return 0; } diff --git a/examples/src/rgb_mono_encoding.cpp b/examples/src/rgb_mono_encoding.cpp index e943e9535..b99efe5a0 100644 --- a/examples/src/rgb_mono_encoding.cpp +++ b/examples/src/rgb_mono_encoding.cpp @@ -1,5 +1,4 @@ #include -#include #include // Inludes common necessary includes for development using depthai library @@ -11,9 +10,8 @@ static void sigintHandler(int signum) { alive = false; } -int main(int argc, char** argv) { +int main() { using namespace std; - using namespace std::chrono; std::signal(SIGINT, &sigintHandler); // Create pipeline @@ -64,7 +62,7 @@ int main(int argc, char** argv) { auto videoFile1 = std::ofstream("mono1.h264", std::ios::binary); auto videoFile2 = std::ofstream("color.h265", std::ios::binary); auto videoFile3 = std::ofstream("mono2.h264", std::ios::binary); - std::cout << "Press Ctrl+C to stop encoding..." << std::endl; + cout << "Press Ctrl+C to stop encoding..." << endl; while(alive) { auto out1 = outQ1->get(); @@ -75,10 +73,10 @@ int main(int argc, char** argv) { videoFile3.write((char*)out3->getData().data(), out3->getData().size()); } - std::cout << "To view the encoded data, convert the stream file (.h264/.h265) into a video file (.mp4), using a command below:" << std::endl; - std::cout << "ffmpeg -framerate 30 -i mono1.h264 -c copy mono1.mp4" << std::endl; - std::cout << "ffmpeg -framerate 30 -i mono2.h264 -c copy mono2.mp4" << std::endl; - std::cout << "ffmpeg -framerate 30 -i color.h265 -c copy color.mp4" << std::endl; + cout << "To view the encoded data, convert the stream file (.h264/.h265) into a video file (.mp4), using a command below:" << endl; + cout << "ffmpeg -framerate 30 -i mono1.h264 -c copy mono1.mp4" << endl; + cout << "ffmpeg -framerate 30 -i mono2.h264 -c copy mono2.mp4" << endl; + cout << "ffmpeg -framerate 30 -i color.h265 -c copy color.mp4" << endl; return 0; } diff --git a/examples/src/rgb_preview.cpp b/examples/src/rgb_preview.cpp index 8dbc7cdc7..2cc958494 100644 --- a/examples/src/rgb_preview.cpp +++ b/examples/src/rgb_preview.cpp @@ -1,12 +1,9 @@ -#include #include // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" int main() { - using namespace std; - // Create pipeline dai::Pipeline pipeline; @@ -36,7 +33,7 @@ int main() { auto inRgb = qRgb->get(); // Retrieve 'bgr' (opencv format) frame - cv::imshow("bgr", inRgb->getCvFrame()); + cv::imshow("rgb", inRgb->getCvFrame()); int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { diff --git a/examples/src/rgb_video.cpp b/examples/src/rgb_video.cpp index 00c67c2df..ecb349e4d 100644 --- a/examples/src/rgb_video.cpp +++ b/examples/src/rgb_video.cpp @@ -1,12 +1,9 @@ -#include #include // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" int main() { - using namespace std; - // Create pipeline dai::Pipeline pipeline; diff --git a/examples/src/spatial_location_calculator.cpp b/examples/src/spatial_location_calculator.cpp index 108e5fb63..c09c33dbc 100644 --- a/examples/src/spatial_location_calculator.cpp +++ b/examples/src/spatial_location_calculator.cpp @@ -1,4 +1,3 @@ -#include #include #include "utility.hpp" diff --git a/examples/src/spatial_mobilenet.cpp b/examples/src/spatial_mobilenet.cpp index 68dc36e5d..9a822d66e 100644 --- a/examples/src/spatial_mobilenet.cpp +++ b/examples/src/spatial_mobilenet.cpp @@ -1,9 +1,6 @@ #include -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" @@ -72,10 +69,11 @@ int main(int argc, char** argv) { monoRight->out.link(stereo->right); camRgb->preview.link(spatialDetectionNetwork->input); - if(syncNN) + if(syncNN) { spatialDetectionNetwork->passthrough.link(xoutRgb->input); - else + } else { camRgb->preview.link(xoutRgb->input); + } spatialDetectionNetwork->out.link(xoutNN->input); spatialDetectionNetwork->boundingBoxMapping.link(xoutBoundingBoxDepthMapping->input); diff --git a/examples/src/spatial_mobilenet_mono.cpp b/examples/src/spatial_mobilenet_mono.cpp index aea0809ff..9eac14972 100644 --- a/examples/src/spatial_mobilenet_mono.cpp +++ b/examples/src/spatial_mobilenet_mono.cpp @@ -1,9 +1,6 @@ #include -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" @@ -73,10 +70,11 @@ int main(int argc, char** argv) { monoRight->out.link(stereo->right); imageManip->out.link(spatialDetectionNetwork->input); - if(syncNN) + if(syncNN) { spatialDetectionNetwork->passthrough.link(xoutManip->input); - else + } else { imageManip->out.link(xoutManip->input); + } spatialDetectionNetwork->out.link(nnOut->input); spatialDetectionNetwork->boundingBoxMapping.link(depthRoiMap->input); diff --git a/examples/src/spatial_object_tracker.cpp b/examples/src/spatial_object_tracker.cpp index fbac703c2..e1faf9119 100644 --- a/examples/src/spatial_object_tracker.cpp +++ b/examples/src/spatial_object_tracker.cpp @@ -1,6 +1,4 @@ #include -#include -#include #include "utility.hpp" @@ -11,7 +9,7 @@ static const std::vector labelMap = {"background", "aeroplane", "bi "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; -static bool syncNN = true; +static std::atomic syncNN{true}; int main(int argc, char** argv) { using namespace std; diff --git a/examples/src/spatial_tiny_yolo.cpp b/examples/src/spatial_tiny_yolo.cpp index c1709eed1..2e4cd8717 100644 --- a/examples/src/spatial_tiny_yolo.cpp +++ b/examples/src/spatial_tiny_yolo.cpp @@ -1,6 +1,4 @@ #include -#include -#include #include "utility.hpp" @@ -18,7 +16,7 @@ static const std::vector labelMap = { "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"}; -static bool syncNN = true; +static std::atomic syncNN{true}; int main(int argc, char** argv) { using namespace std; diff --git a/examples/src/stereo_depth_video.cpp b/examples/src/stereo_depth_video.cpp index bfbad4c3c..c622d1f0f 100644 --- a/examples/src/stereo_depth_video.cpp +++ b/examples/src/stereo_depth_video.cpp @@ -1,16 +1,12 @@ -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" +static std::atomic withDepth{true}; + int main() { using namespace std; - using namespace std::chrono; - // TODO - split this example into two separate examples - bool withDepth = true; // Create pipeline dai::Pipeline pipeline; @@ -45,11 +41,11 @@ int main() { monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); // monoRight->setFps(5.0); - bool outputDepth = false; - bool outputRectified = true; - bool lrcheck = true; - bool extended = false; - bool subpixel = false; + std::atomic outputDepth{false}; + std::atomic outputRectified{true}; + std::atomic lrcheck{true}; + std::atomic extended{false}; + std::atomic subpixel{false}; int maxDisp = 96; if(extended) maxDisp *= 2; diff --git a/examples/src/system_information.cpp b/examples/src/system_information.cpp index 72695b9c6..a84890fa6 100644 --- a/examples/src/system_information.cpp +++ b/examples/src/system_information.cpp @@ -1,4 +1,3 @@ -#include #include // Inludes common necessary includes for development using depthai library @@ -19,8 +18,6 @@ void printSystemInformation(dai::SystemInformation info) { } int main() { - using namespace std; - // Create pipeline dai::Pipeline pipeline; diff --git a/examples/src/tiny_yolo_v3_device_side_decoding.cpp b/examples/src/tiny_yolo_v3_device_side_decoding.cpp index e5adde17f..36a73c788 100644 --- a/examples/src/tiny_yolo_v3_device_side_decoding.cpp +++ b/examples/src/tiny_yolo_v3_device_side_decoding.cpp @@ -1,9 +1,6 @@ #include -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" diff --git a/examples/src/tiny_yolo_v4_device_side_decoding.cpp b/examples/src/tiny_yolo_v4_device_side_decoding.cpp index 942fded16..3309492cb 100644 --- a/examples/src/tiny_yolo_v4_device_side_decoding.cpp +++ b/examples/src/tiny_yolo_v4_device_side_decoding.cpp @@ -1,9 +1,6 @@ #include -#include #include -#include "utility.hpp" - // Inludes common necessary includes for development using depthai library #include "depthai/depthai.hpp" diff --git a/examples/src/video_mobilenet.cpp b/examples/src/video_mobilenet.cpp index 49083a557..a4ce91420 100644 --- a/examples/src/video_mobilenet.cpp +++ b/examples/src/video_mobilenet.cpp @@ -1,4 +1,3 @@ -#include #include #include "utility.hpp" diff --git a/examples/src/webcam_mobilenet_example.cpp b/examples/src/webcam_mobilenet_example.cpp index dcce8fcf4..a32dd4c1f 100644 --- a/examples/src/webcam_mobilenet_example.cpp +++ b/examples/src/webcam_mobilenet_example.cpp @@ -1,5 +1,4 @@ -#include #include #include "utility.hpp" From 85c9b2f43eb52716c5a78d920956caddf455c63d Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Mon, 17 May 2021 21:38:41 +0300 Subject: [PATCH 43/56] OV9282: fix over-exposure outdoors, in sunlight --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index bc7a1b150..84920637e 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "37840fc261256c6e8a1e7d854838d6055000f195") +set(DEPTHAI_DEVICE_SIDE_COMMIT "6fcedbe811789bb36181f06639b84a0bf01d2172") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") From 916f9cd598cc4fc4f2c0673e594d9eee50ab9b4e Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Mon, 17 May 2021 23:01:02 +0200 Subject: [PATCH 44/56] Hotfix - Lossless encoding --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- shared/depthai-shared | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index bc7a1b150..1108f06d6 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "37840fc261256c6e8a1e7d854838d6055000f195") +set(DEPTHAI_DEVICE_SIDE_COMMIT "2ca9f51f803f2ebb587ffb7fdcea01d6638bdf6a") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/shared/depthai-shared b/shared/depthai-shared index b8555bcff..a29bfbb7a 160000 --- a/shared/depthai-shared +++ b/shared/depthai-shared @@ -1 +1 @@ -Subproject commit b8555bcff359ba390929a74377f31b95e378cc34 +Subproject commit a29bfbb7a4152692b67bacac79be58904a0c26d2 From aa6cb88d0af9bc1f3158cb385f0fb0136e5c392b Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Tue, 18 May 2021 03:35:27 +0300 Subject: [PATCH 45/56] Sync cpp examples with latest python --- examples/src/depth_preview.cpp | 14 ++------------ examples/src/mono_depth_mobilenetssd.cpp | 5 +++-- examples/src/rgb_depth_aligned.cpp | 3 ++- examples/src/rgb_encoding_mono_mobilenet_depth.cpp | 8 ++++---- 4 files changed, 11 insertions(+), 19 deletions(-) diff --git a/examples/src/depth_preview.cpp b/examples/src/depth_preview.cpp index 99f55a173..12054b080 100644 --- a/examples/src/depth_preview.cpp +++ b/examples/src/depth_preview.cpp @@ -33,20 +33,9 @@ int main() { // Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default) depth->setMedianFilter(dai::StereoDepthProperties::MedianFilter::KERNEL_7x7); depth->setLeftRightCheck(lr_check); - - // Normal disparity values range from 0..95, will be used for normalization - int max_disparity = 95; - - if(extended_disparity) max_disparity *= 2; // Double the range depth->setExtendedDisparity(extended_disparity); - - if(subpixel) max_disparity *= 32; // 5 fractional bits, x32 depth->setSubpixel(subpixel); - // When we get disparity to the host, we will multiply all values with the multiplier - // for better visualization - float multiplier = 255 / max_disparity; - // Linking monoLeft->out.link(depth->left); monoRight->out.link(depth->right); @@ -61,7 +50,8 @@ int main() { while(true) { auto inDepth = q->get(); auto frame = inDepth->getFrame(); - frame.convertTo(frame, CV_8UC1, multiplier); + // Normalization for better visualization + frame.convertTo(frame, CV_8UC1, 255 / depth->getMaxDisparity()); cv::imshow("disparity", frame); diff --git a/examples/src/mono_depth_mobilenetssd.cpp b/examples/src/mono_depth_mobilenetssd.cpp index 902b813b5..2e03241f6 100644 --- a/examples/src/mono_depth_mobilenetssd.cpp +++ b/examples/src/mono_depth_mobilenetssd.cpp @@ -102,7 +102,8 @@ int main(int argc, char** argv) { cv::imshow(name, frame); }; - float disparity_multiplier = 255 / 95; + // Disparity range is used for normalization + float disparityMultiplier = 255 / stereo->getMaxDisparity(); while(true) { auto inRight = qRight->get(); @@ -121,7 +122,7 @@ int main(int argc, char** argv) { detection.xmax = 1 - swap; } } - disparityFrame.convertTo(disparityFrame, CV_8UC1, disparity_multiplier); + disparityFrame.convertTo(disparityFrame, CV_8UC1, disparityMultiplier); // Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html cv::applyColorMap(disparityFrame, disparityFrame, cv::COLORMAP_JET); show("disparity", disparityFrame, detections); diff --git a/examples/src/rgb_depth_aligned.cpp b/examples/src/rgb_depth_aligned.cpp index bd381b4c1..98c0bffcd 100644 --- a/examples/src/rgb_depth_aligned.cpp +++ b/examples/src/rgb_depth_aligned.cpp @@ -81,8 +81,9 @@ int main() { if(latestPacket.find(name) != latestPacket.end()) { if(name == "depth") { frame[name] = latestPacket[name]->getFrame(); + auto maxDisparity = stereo->getMaxDisparity(); // Optional, extend range 0..95 -> 0..255, for a better visualisation - if(1) frame[name].convertTo(frame[name], CV_8UC1, 255. / 95); + if(1) frame[name].convertTo(frame[name], CV_8UC1, 255. / maxDisparity); // Optional, apply false colorization if(1) cv::applyColorMap(frame[name], frame[name], cv::COLORMAP_HOT); } else { diff --git a/examples/src/rgb_encoding_mono_mobilenet_depth.cpp b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp index d6934a796..fe48d813a 100644 --- a/examples/src/rgb_encoding_mono_mobilenet_depth.cpp +++ b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp @@ -84,6 +84,9 @@ int main(int argc, char** argv) { manip->out.link(manipOut->input); nn->out.link(nnOut->input); + // Disparity range is used for normalization + float disparityMultiplier = 255 / depth->getMaxDisparity(); + // Connect to device and start pipeline dai::Device device(pipeline); @@ -99,9 +102,6 @@ int main(int argc, char** argv) { cv::namedWindow("right", cv::WINDOW_NORMAL); cv::namedWindow("manip", cv::WINDOW_NORMAL); - // Disparity range is 0..95, used for normalization - float disparity_multiplier = 255 / 95; - while(true) { auto inRight = qRight->get(); auto inManip = qManip->get(); @@ -119,7 +119,7 @@ int main(int argc, char** argv) { if(flipRectified) { cv::flip(frameDisparity, frameDisparity, 1); } - frameDisparity.convertTo(frameDisparity, CV_8UC1, disparity_multiplier); + frameDisparity.convertTo(frameDisparity, CV_8UC1, disparityMultiplier); cv::applyColorMap(frameDisparity, frameDisparity, cv::COLORMAP_JET); int offsetX = (camRight->getResolutionWidth() - camRight->getResolutionHeight()) / 2; From 74f7846466ce764c1e08285358fe5eb4a14f7fb4 Mon Sep 17 00:00:00 2001 From: CsabaGergely Date: Wed, 19 May 2021 01:17:11 +0300 Subject: [PATCH 46/56] Synch --- examples/CMakeLists.txt | 2 +- examples/src/encoding_max_limit.cpp | 14 +- examples/src/mono_depth_mobilenetssd.cpp | 58 ++++-- examples/src/mono_full_resolution_saver.cpp | 7 +- examples/src/mono_mobilenet.cpp | 32 +++- examples/src/mono_preview.cpp | 14 +- examples/src/object_tracker.cpp | 11 +- examples/src/rgb_camera_control.cpp | 91 ++++----- examples/src/rgb_encoding.cpp | 1 - examples/src/rgb_encoding_mobilenet.cpp | 29 ++- examples/src/rgb_encoding_mono_mobilenet.cpp | 109 ++++++----- .../src/rgb_encoding_mono_mobilenet_depth.cpp | 179 ++++++++++-------- examples/src/rgb_full_resolution_saver.cpp | 4 +- examples/src/rgb_mobilenet.cpp | 42 ++-- examples/src/rgb_mobilenet_4k.cpp | 39 +++- ...age_manip_warp.cpp => rgb_rotate_warp.cpp} | 0 examples/src/spatial_location_calculator.cpp | 6 +- examples/src/spatial_mobilenet.cpp | 36 ++-- examples/src/spatial_mobilenet_mono.cpp | 11 +- examples/src/spatial_object_tracker.cpp | 37 ++-- examples/src/spatial_tiny_yolo.cpp | 11 +- examples/src/system_information.cpp | 5 +- .../src/tiny_yolo_v3_device_side_decoding.cpp | 42 ++-- .../src/tiny_yolo_v4_device_side_decoding.cpp | 46 +++-- 24 files changed, 499 insertions(+), 327 deletions(-) rename examples/src/{image_manip_warp.cpp => rgb_rotate_warp.cpp} (100%) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 2a184ae7c..d20fb20d1 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -165,7 +165,7 @@ target_compile_definitions(rgb_encoding_mobilenet PRIVATE BLOB_PATH="${mobilenet dai_add_example(image_manip src/image_manip_example.cpp) # Imagae manip node exapmle with warping -dai_add_example(image_manip_warp src/image_manip_warp.cpp) +dai_add_example(rgb_rotate_warp src/rgb_rotate_warp.cpp) # Device side decoding example for tiny-yolo-v3 dai_add_example(tiny_yolo_v3_device_side_decoding src/tiny_yolo_v3_device_side_decoding.cpp) diff --git a/examples/src/encoding_max_limit.cpp b/examples/src/encoding_max_limit.cpp index 943d53f86..8d29a5392 100644 --- a/examples/src/encoding_max_limit.cpp +++ b/examples/src/encoding_max_limit.cpp @@ -20,8 +20,8 @@ int main() { // Define sources and outputs auto camRgb = pipeline.create(); - auto monoCam = pipeline.create(); - auto monoCam2 = pipeline.create(); + auto monoLeft = pipeline.create(); + auto monoRight = pipeline.create(); auto ve1 = pipeline.create(); auto ve2 = pipeline.create(); auto ve3 = pipeline.create(); @@ -37,18 +37,18 @@ int main() { // Properties camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); - monoCam->setBoardSocket(dai::CameraBoardSocket::LEFT); - monoCam2->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); // Setting to 26fps will trigger error ve1->setDefaultProfilePreset(1280, 720, 25, dai::VideoEncoderProperties::Profile::H264_MAIN); ve2->setDefaultProfilePreset(3840, 2160, 25, dai::VideoEncoderProperties::Profile::H265_MAIN); ve3->setDefaultProfilePreset(1280, 720, 25, dai::VideoEncoderProperties::Profile::H264_MAIN); - // Create outputs - monoCam->out.link(ve1->input); + // Linking + monoLeft->out.link(ve1->input); camRgb->video.link(ve2->input); - monoCam2->out.link(ve3->input); + monoRight->out.link(ve3->input); ve1->bitstream.link(ve1Out->input); ve2->bitstream.link(ve2Out->input); diff --git a/examples/src/mono_depth_mobilenetssd.cpp b/examples/src/mono_depth_mobilenetssd.cpp index 2e03241f6..cfa5b5812 100644 --- a/examples/src/mono_depth_mobilenetssd.cpp +++ b/examples/src/mono_depth_mobilenetssd.cpp @@ -54,6 +54,7 @@ int main(int argc, char** argv) { manip->initialConfig.setResize(300, 300); // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) manip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p); + // Define a neural network that will make predictions based on the source frames nn->setConfidenceThreshold(0.5); nn->setBlobPath(nnPath); @@ -77,6 +78,10 @@ int main(int argc, char** argv) { auto qDisparity = device.getOutputQueue("disparity", 4, false); auto qDet = device.getOutputQueue("nn", 4, false); + cv::Mat rightFrame; + cv::Mat disparityFrame; + std::vector detections; + // Add bounding boxes and text to the frame and show it to the user auto show = [](std::string name, cv::Mat frame, std::vector& detections) { auto color = cv::Scalar(255, 192, 203); @@ -102,31 +107,44 @@ int main(int argc, char** argv) { cv::imshow(name, frame); }; - // Disparity range is used for normalization float disparityMultiplier = 255 / stereo->getMaxDisparity(); while(true) { - auto inRight = qRight->get(); - auto inDet = qDet->get(); - auto inDisparity = qDisparity->get(); - auto detections = inDet->detections; - cv::Mat rightFrame = inRight->getCvFrame(); - cv::Mat disparityFrame = inDisparity->getCvFrame(); - - if(flipRectified) { - cv::flip(rightFrame, rightFrame, 1); - - for(auto& detection : detections) { - auto swap = detection.xmin; - detection.xmin = 1 - detection.xmax; - detection.xmax = 1 - swap; + // Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise + auto inRight = qRight->tryGet(); + auto inDet = qDet->tryGet(); + auto inDisparity = qDisparity->tryGet(); + + if(inRight) { + rightFrame = inRight->getCvFrame(); + if(flipRectified) { + cv::flip(rightFrame, rightFrame, 1); + } + } + + if(inDet) { + detections = inDet->detections; + if(flipRectified) { + for(auto& detection : detections) { + auto swap = detection.xmin; + detection.xmin = 1 - detection.xmax; + detection.xmax = 1 - swap; + } } } - disparityFrame.convertTo(disparityFrame, CV_8UC1, disparityMultiplier); - // Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html - cv::applyColorMap(disparityFrame, disparityFrame, cv::COLORMAP_JET); - show("disparity", disparityFrame, detections); - show("rectified right", rightFrame, detections); + + if(inDisparity) { + // Frame is transformed, normalized, and color map will be applied to highlight the depth info + disparityFrame = inDisparity->getFrame(); + disparityFrame.convertTo(disparityFrame, CV_8UC1, disparityMultiplier); + // Available color maps: https://docs.opencv.org/3.4/d3/d50/group__imgproc__colormap.html + cv::applyColorMap(disparityFrame, disparityFrame, cv::COLORMAP_JET); + show("disparity", disparityFrame, detections); + } + + if(!rightFrame.empty()) { + show("rectified right", rightFrame, detections); + } int key = cv::waitKey(1); if(key == 'q' || key == 'Q') return 0; diff --git a/examples/src/mono_full_resolution_saver.cpp b/examples/src/mono_full_resolution_saver.cpp index 2cfac6eab..a3e98346a 100644 --- a/examples/src/mono_full_resolution_saver.cpp +++ b/examples/src/mono_full_resolution_saver.cpp @@ -26,7 +26,7 @@ int main() { // Connect to device and start pipeline dai::Device device(pipeline); - // Queue + // Output queue will be used to get the grayscale frames from the output defined above auto qRight = device.getOutputQueue("right", 4, false); std::string dirName = "mono_data"; @@ -34,6 +34,7 @@ int main() { while(true) { auto inRight = qRight->get(); + // Data is originally represented as a flat 1D array, it needs to be converted into HxW form // Frame is transformed and ready to be shown cv::imshow("right", inRight->getCvFrame()); @@ -44,7 +45,9 @@ int main() { cv::imwrite(videoStr.str(), inRight->getCvFrame()); int key = cv::waitKey(1); - if(key == 'q' || key == 'Q') return 0; + if(key == 'q' || key == 'Q') { + return 0; + } } return 0; } diff --git a/examples/src/mono_mobilenet.cpp b/examples/src/mono_mobilenet.cpp index 9ad882756..bb16d5603 100644 --- a/examples/src/mono_mobilenet.cpp +++ b/examples/src/mono_mobilenet.cpp @@ -40,15 +40,17 @@ int main(int argc, char** argv) { // Properties monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); - nn->setConfidenceThreshold(0.5); - nn->setBlobPath(nnPath); - nn->setNumInferenceThreads(2); - nn->input.setBlocking(false); + // Convert the grayscale frame into the nn-acceptable form manip->initialConfig.setResize(300, 300); // The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) manip->initialConfig.setFrameType(dai::ImgFrame::Type::BGR888p); + nn->setConfidenceThreshold(0.5); + nn->setBlobPath(nnPath); + nn->setNumInferenceThreads(2); + nn->input.setBlocking(false); + // Linking monoRight->out.link(manip->inputImage); manip->out.link(nn->input); @@ -62,6 +64,9 @@ int main(int argc, char** argv) { auto qRight = device.getOutputQueue("right", 4, false); auto qDet = device.getOutputQueue("nn", 4, false); + cv::Mat frame; + std::vector detections; + // Add bounding boxes and text to the frame and show it to the user auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { auto color = cv::Scalar(255, 0, 0); @@ -88,12 +93,21 @@ int main(int argc, char** argv) { }; while(true) { - auto inRight = qRight->get(); - auto inDet = qDet->get(); - auto detections = inDet->detections; - cv::Mat frame = inRight->getCvFrame(); + // Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise + auto inRight = qRight->tryGet(); + auto inDet = qDet->tryGet(); + + if(inRight) { + frame = inRight->getCvFrame(); + } + + if(inDet) { + detections = inDet->detections; + } - displayFrame("right", frame, detections); + if(!frame.empty()) { + displayFrame("right", frame, detections); + } int key = cv::waitKey(1); if(key == 'q' || key == 'Q') return 0; diff --git a/examples/src/mono_preview.cpp b/examples/src/mono_preview.cpp index cd53e08fe..44558145e 100644 --- a/examples/src/mono_preview.cpp +++ b/examples/src/mono_preview.cpp @@ -34,11 +34,17 @@ int main() { auto qRight = device.getOutputQueue("right", 4, false); while(true) { - auto inLeft = qLeft->get(); - auto inRight = qRight->get(); + // Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise + auto inLeft = qLeft->tryGet(); + auto inRight = qRight->tryGet(); - cv::imshow("left", inLeft->getCvFrame()); - cv::imshow("right", inRight->getCvFrame()); + if(inLeft) { + cv::imshow("left", inLeft->getCvFrame()); + } + + if(inRight) { + cv::imshow("right", inRight->getCvFrame()); + } int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { diff --git a/examples/src/object_tracker.cpp b/examples/src/object_tracker.cpp index c260b6983..dc43559c2 100644 --- a/examples/src/object_tracker.cpp +++ b/examples/src/object_tracker.cpp @@ -8,7 +8,7 @@ static const std::vector labelMap = {"background", "aeroplane", "bi "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; -static std::atomic syncNN{true}; +static std::atomic fullFrameTracking{false}; int main(int argc, char** argv) { using namespace std; @@ -57,13 +57,14 @@ int main(int argc, char** argv) { // Linking camRgb->preview.link(detectionNetwork->input); - if(syncNN) { - objectTracker->passthroughTrackerFrame.link(xlinkOut->input); + objectTracker->passthroughTrackerFrame.link(xlinkOut->input); + + if(fullFrameTracking) { + camRgb->video.link(objectTracker->inputTrackerFrame); } else { - camRgb->preview.link(xlinkOut->input); + detectionNetwork->passthrough.link(objectTracker->inputTrackerFrame); } - detectionNetwork->passthrough.link(objectTracker->inputTrackerFrame); detectionNetwork->passthrough.link(objectTracker->inputDetectionFrame); detectionNetwork->out.link(objectTracker->inputDetections); objectTracker->out.link(trackerOut->input); diff --git a/examples/src/rgb_camera_control.cpp b/examples/src/rgb_camera_control.cpp index 74b3c6412..791b4b900 100644 --- a/examples/src/rgb_camera_control.cpp +++ b/examples/src/rgb_camera_control.cpp @@ -68,33 +68,34 @@ int main() { // Connect to device and start pipeline dai::Device device(pipeline); - // Create data queues + // Get data queues auto controlQueue = device.getInputQueue("control"); auto configQueue = device.getInputQueue("config"); auto previewQueue = device.getOutputQueue("preview"); auto videoQueue = device.getOutputQueue("video"); auto stillQueue = device.getOutputQueue("still"); - // Max crop_x & crop_y - float max_crop_x = (camRgb->getResolutionWidth() - camRgb->getVideoWidth()) / (float)camRgb->getResolutionWidth(); - float max_crop_y = (camRgb->getResolutionHeight() - camRgb->getVideoHeight()) / (float)camRgb->getResolutionHeight(); + // Max cropX & cropY + float maxCropX = (camRgb->getResolutionWidth() - camRgb->getVideoWidth()) / (float)camRgb->getResolutionWidth(); + float maxCropY = (camRgb->getResolutionHeight() - camRgb->getVideoHeight()) / (float)camRgb->getResolutionHeight(); // Default crop - float crop_x = 0; - float crop_y = 0; + float cropX = 0; + float cropY = 0; + bool sendCamConfig = true; // Defaults and limits for manual focus/exposure controls - int lens_pos = 150; - int lens_min = 0; - int lens_max = 255; + int lensPos = 150; + int lensMin = 0; + int lensMax = 255; - int exp_time = 20000; - int exp_min = 1; - int exp_max = 33000; + int expTime = 20000; + int expMin = 1; + int expMax = 33000; - int sens_iso = 800; - int sens_min = 100; - int sens_max = 1600; + int sensIso = 800; + int sensMin = 100; + int sensMax = 1600; while(true) { auto previewFrames = previewQueue->tryGetAll(); @@ -109,6 +110,15 @@ int main() { auto frame = cv::imdecode(videoFrame->getData(), cv::IMREAD_UNCHANGED); // Display cv::imshow("video", frame); + + // Send new cfg to camera + if(sendCamConfig) { + dai::ImageManipConfig cfg; + cfg.setCropRect(cropX, cropY, 0, 0); + configQueue->send(cfg); + printf("Sending new crop - x: %f, y: %f\n", cropX, cropY); + sendCamConfig = false; + } } auto stillFrames = stillQueue->tryGetAll(); @@ -122,7 +132,7 @@ int main() { // Update screen (1ms pooling rate) int key = cv::waitKey(1); if(key == 'q') { - return 0; + break; } else if(key == 'c') { dai::CameraControl ctrl; ctrl.setCaptureStill(true); @@ -144,44 +154,39 @@ int main() { ctrl.setAutoExposureEnable(); controlQueue->send(ctrl); } else if(key == ',' || key == '.') { - if(key == ',') lens_pos -= LENS_STEP; - if(key == '.') lens_pos += LENS_STEP; - lens_pos = clamp(lens_pos, lens_min, lens_max); - printf("Setting manual focus, lens position: %d\n", lens_pos); + if(key == ',') lensPos -= LENS_STEP; + if(key == '.') lensPos += LENS_STEP; + lensPos = clamp(lensPos, lensMin, lensMax); + printf("Setting manual focus, lens position: %d\n", lensPos); dai::CameraControl ctrl; - ctrl.setManualFocus(lens_pos); + ctrl.setManualFocus(lensPos); controlQueue->send(ctrl); } else if(key == 'i' || key == 'o' || key == 'k' || key == 'l') { - if(key == 'i') exp_time -= EXP_STEP; - if(key == 'o') exp_time += EXP_STEP; - if(key == 'k') sens_iso -= ISO_STEP; - if(key == 'l') sens_iso += ISO_STEP; - exp_time = clamp(exp_time, exp_min, exp_max); - sens_iso = clamp(sens_iso, sens_min, sens_max); - printf("Setting manual exposure, time: %d, iso: %d\n", exp_time, sens_iso); + if(key == 'i') expTime -= EXP_STEP; + if(key == 'o') expTime += EXP_STEP; + if(key == 'k') sensIso -= ISO_STEP; + if(key == 'l') sensIso += ISO_STEP; + expTime = clamp(expTime, expMin, expMax); + sensIso = clamp(sensIso, sensMin, sensMax); + printf("Setting manual exposure, time: %d, iso: %d\n", expTime, sensIso); dai::CameraControl ctrl; - ctrl.setManualExposure(exp_time, sens_iso); + ctrl.setManualExposure(expTime, sensIso); controlQueue->send(ctrl); } else if(key == 'w' || key == 'a' || key == 's' || key == 'd') { if(key == 'a') { - crop_x -= (max_crop_x / camRgb->getResolutionWidth()) * STEP_SIZE; - if(crop_x < 0) crop_x = max_crop_x; + cropX -= (maxCropX / camRgb->getResolutionWidth()) * STEP_SIZE; + if(cropX < 0) cropX = maxCropX; } else if(key == 'd') { - crop_x += (max_crop_x / camRgb->getResolutionWidth()) * STEP_SIZE; - if(crop_x > max_crop_x) crop_x = 0.0f; + cropX += (maxCropX / camRgb->getResolutionWidth()) * STEP_SIZE; + if(cropX > maxCropX) cropX = 0.0f; } else if(key == 'w') { - crop_y -= (max_crop_y / camRgb->getResolutionHeight()) * STEP_SIZE; - if(crop_y < 0) crop_y = max_crop_y; + cropY -= (maxCropY / camRgb->getResolutionHeight()) * STEP_SIZE; + if(cropY < 0) cropY = maxCropY; } else if(key == 's') { - crop_y += (max_crop_y / camRgb->getResolutionHeight()) * STEP_SIZE; - if(crop_y > max_crop_y) crop_y = 0.0f; + cropY += (maxCropY / camRgb->getResolutionHeight()) * STEP_SIZE; + if(cropY > maxCropY) cropY = 0.0f; } - - // Send new cfg to camera - dai::ImageManipConfig cfg; - cfg.setCropRect(crop_x, crop_y, 0, 0); - configQueue->send(cfg); - printf("Sending new crop - x: %f, y: %f\n", crop_x, crop_y); + sendCamConfig = true; } } return 0; diff --git a/examples/src/rgb_encoding.cpp b/examples/src/rgb_encoding.cpp index b0f6b3c2a..52a192003 100644 --- a/examples/src/rgb_encoding.cpp +++ b/examples/src/rgb_encoding.cpp @@ -27,7 +27,6 @@ int main(int argc, char** argv) { // Properties camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_4_K); - videoEnc->setDefaultProfilePreset(3840, 2160, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); // Linking diff --git a/examples/src/rgb_encoding_mobilenet.cpp b/examples/src/rgb_encoding_mobilenet.cpp index 081bbd25b..991e13189 100644 --- a/examples/src/rgb_encoding_mobilenet.cpp +++ b/examples/src/rgb_encoding_mobilenet.cpp @@ -67,6 +67,9 @@ int main(int argc, char** argv) { auto qDet = device.getOutputQueue("nn", queueSize); auto qRgbEnc = device.getOutputQueue("h265", 30, true); + cv::Mat frame; + std::vector detections; + // Add bounding boxes and text to the frame and show it to the user auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { auto color = cv::Scalar(255, 0, 0); @@ -95,22 +98,30 @@ int main(int argc, char** argv) { auto videoFile = std::ofstream("video.h264", std::ios::binary); while(true) { - auto inRgb = qRgb->get(); - auto inDet = qDet->get(); - auto out = qRgbEnc->get(); - auto detections = inDet->detections; - cv::Mat frame = inRgb->getCvFrame(); + auto inRgb = qRgb->tryGet(); + auto inDet = qDet->tryGet(); + auto out = qRgbEnc->get(); videoFile.write((char*)out->getData().data(), out->getData().size()); - displayFrame("rgb", frame, detections); + if(inRgb) { + frame = inRgb->getCvFrame(); + } + + if(inDet) { + detections = inDet->detections; + } + + if(!frame.empty()) { + displayFrame("rgb", frame, detections); + } int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { - cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << endl; - cout << "ffmpeg -framerate 30 -i video.h264 -c copy video.mp4" << endl; - return 0; + break; } } + cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << endl; + cout << "ffmpeg -framerate 30 -i video.h264 -c copy video.mp4" << endl; return 0; } diff --git a/examples/src/rgb_encoding_mono_mobilenet.cpp b/examples/src/rgb_encoding_mono_mobilenet.cpp index 0fd43c6c6..249459ec9 100644 --- a/examples/src/rgb_encoding_mono_mobilenet.cpp +++ b/examples/src/rgb_encoding_mono_mobilenet.cpp @@ -77,69 +77,86 @@ int main(int argc, char** argv) { auto qDet = device.getOutputQueue("nn", queueSize); auto qRgbEnc = device.getOutputQueue("h265", 30, true); + cv::Mat frame; + cv::Mat frameManip; + std::vector detections; + int offsetX = (monoRight->getResolutionWidth() - monoRight->getResolutionHeight()) / 2; + auto color = cv::Scalar(255, 0, 0); + auto videoFile = std::ofstream("video.h265", std::ios::binary); cv::namedWindow("right", cv::WINDOW_NORMAL); cv::namedWindow("manip", cv::WINDOW_NORMAL); while(true) { - auto inRight = qRight->get(); - auto inManip = qManip->get(); - auto inDet = qDet->get(); - auto detections = inDet->detections; - cv::Mat frame = inRight->getCvFrame(); - cv::Mat frameManip = inManip->getCvFrame(); + auto inRight = qRight->tryGet(); + auto inManip = qManip->tryGet(); + auto inDet = qDet->tryGet(); auto out1 = qRgbEnc->get(); videoFile.write((char*)out1->getData().data(), out1->getData().size()); - int offsetX = (monoRight->getResolutionWidth() - monoRight->getResolutionHeight()) / 2; - auto color = cv::Scalar(255, 0, 0); - for(auto& detection : detections) { - int x1 = detection.xmin * monoRight->getResolutionHeight() + offsetX; - int y1 = detection.ymin * monoRight->getResolutionHeight(); - int x2 = detection.xmax * monoRight->getResolutionHeight() + offsetX; - int y2 = detection.ymax * monoRight->getResolutionHeight(); - - int labelIndex = detection.label; - std::string labelStr = to_string(labelIndex); - if(labelIndex < labelMap.size()) { - labelStr = labelMap[labelIndex]; + if(inRight) { + frame = inRight->getCvFrame(); + } + + if(inManip) { + frameManip = inManip->getCvFrame(); + } + + if(inDet) { + detections = inDet->detections; + } + + if(!frame.empty()) { + for(auto& detection : detections) { + int x1 = detection.xmin * monoRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * monoRight->getResolutionHeight(); + int x2 = detection.xmax * monoRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * monoRight->getResolutionHeight(); + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; - cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + // Show the frame + cv::imshow("right", frame); } - // Show the frame - cv::imshow("right", frame); - - for(auto& detection : detections) { - int x1 = detection.xmin * frameManip.cols; - int y1 = detection.ymin * frameManip.rows; - int x2 = detection.xmax * frameManip.cols; - int y2 = detection.ymax * frameManip.rows; - - int labelIndex = detection.label; - std::string labelStr = to_string(labelIndex); - if(labelIndex < labelMap.size()) { - labelStr = labelMap[labelIndex]; + + if(!frameManip.empty()) { + for(auto& detection : detections) { + int x1 = detection.xmin * frameManip.cols; + int y1 = detection.ymin * frameManip.rows; + int x2 = detection.xmax * frameManip.cols; + int y2 = detection.ymax * frameManip.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frameManip, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frameManip, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frameManip, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } - cv::putText(frameManip, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; - cv::putText(frameManip, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - cv::rectangle(frameManip, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + // Show the frame + cv::imshow("manip", frameManip); } - // Show the frame - cv::imshow("manip", frameManip); int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { - cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << endl; - cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << endl; - return 0; + break; } } + cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << endl; + cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << endl; return 0; } diff --git a/examples/src/rgb_encoding_mono_mobilenet_depth.cpp b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp index fe48d813a..139fa6d00 100644 --- a/examples/src/rgb_encoding_mono_mobilenet_depth.cpp +++ b/examples/src/rgb_encoding_mono_mobilenet_depth.cpp @@ -31,11 +31,11 @@ int main(int argc, char** argv) { // Define sources and outputs auto camRgb = pipeline.create(); auto videoEncoder = pipeline.create(); + auto monoRight = pipeline.create(); auto monoLeft = pipeline.create(); - auto camRight = pipeline.create(); auto depth = pipeline.create(); - auto nn = pipeline.create(); auto manip = pipeline.create(); + auto nn = pipeline.create(); auto videoOut = pipeline.create(); auto xoutRight = pipeline.create(); @@ -52,11 +52,11 @@ int main(int argc, char** argv) { // Properties camRgb->setBoardSocket(dai::CameraBoardSocket::RGB); camRgb->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); - videoEncoder->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); - camRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); - camRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + monoRight->setBoardSocket(dai::CameraBoardSocket::RIGHT); + monoRight->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); monoLeft->setBoardSocket(dai::CameraBoardSocket::LEFT); monoLeft->setResolution(dai::MonoCameraProperties::SensorResolution::THE_400_P); + videoEncoder->setDefaultProfilePreset(1920, 1080, 30, dai::VideoEncoderProperties::Profile::H265_MAIN); // Note: the rectified streams are horizontally mirrored by default depth->setConfidenceThreshold(255); @@ -75,8 +75,8 @@ int main(int argc, char** argv) { // Linking camRgb->video.link(videoEncoder->input); videoEncoder->bitstream.link(videoOut->input); - camRight->out.link(xoutRight->input); - camRight->out.link(depth->right); + monoRight->out.link(xoutRight->input); + monoRight->out.link(depth->right); monoLeft->out.link(depth->left); depth->disparity.link(disparityOut->input); depth->rectifiedRight.link(manip->inputImage); @@ -98,98 +98,119 @@ int main(int argc, char** argv) { auto qDet = device.getOutputQueue("nn", queueSize); auto qRgbEnc = device.getOutputQueue("h265", 30, true); + cv::Mat frame; + cv::Mat frameManip; + cv::Mat frameDisparity; + std::vector detections; + int offsetX = (monoRight->getResolutionWidth() - monoRight->getResolutionHeight()) / 2; + auto color = cv::Scalar(255, 0, 0); + auto videoFile = std::ofstream("video.h265", std::ios::binary); cv::namedWindow("right", cv::WINDOW_NORMAL); cv::namedWindow("manip", cv::WINDOW_NORMAL); while(true) { - auto inRight = qRight->get(); - auto inManip = qManip->get(); - auto inDet = qDet->get(); - auto inDisparity = qDisparity->get(); - auto detections = inDet->detections; - - cv::Mat frame = inRight->getCvFrame(); - cv::Mat frameManip = inManip->getCvFrame(); - cv::Mat frameDisparity = inDisparity->getCvFrame(); + auto inRight = qRight->tryGet(); + auto inManip = qManip->tryGet(); + auto inDet = qDet->tryGet(); + auto inDisparity = qDisparity->tryGet(); auto out1 = qRgbEnc->get(); videoFile.write((char*)out1->getData().data(), out1->getData().size()); - if(flipRectified) { - cv::flip(frameDisparity, frameDisparity, 1); + if(inRight) { + frame = inRight->getCvFrame(); + } + + if(inManip) { + frameManip = inManip->getCvFrame(); } - frameDisparity.convertTo(frameDisparity, CV_8UC1, disparityMultiplier); - cv::applyColorMap(frameDisparity, frameDisparity, cv::COLORMAP_JET); - - int offsetX = (camRight->getResolutionWidth() - camRight->getResolutionHeight()) / 2; - auto color = cv::Scalar(255, 0, 0); - for(auto& detection : detections) { - int x1 = detection.xmin * camRight->getResolutionHeight() + offsetX; - int y1 = detection.ymin * camRight->getResolutionHeight(); - int x2 = detection.xmax * camRight->getResolutionHeight() + offsetX; - int y2 = detection.ymax * camRight->getResolutionHeight(); - - int labelIndex = detection.label; - std::string labelStr = to_string(labelIndex); - if(labelIndex < labelMap.size()) { - labelStr = labelMap[labelIndex]; + + if(inDisparity) { + frameDisparity = inDisparity->getCvFrame(); + if(flipRectified) { + cv::flip(frameDisparity, frameDisparity, 1); + } + frameDisparity.convertTo(frameDisparity, CV_8UC1, disparityMultiplier); + cv::applyColorMap(frameDisparity, frameDisparity, cv::COLORMAP_JET); + } + + if(inDet) { + detections = inDet->detections; + } + + if(!frame.empty()) { + for(auto& detection : detections) { + int x1 = detection.xmin * monoRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * monoRight->getResolutionHeight(); + int x2 = detection.xmax * monoRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * monoRight->getResolutionHeight(); + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; - cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + // Show the right cam frame + cv::imshow("right", frame); } - // Show the right cam frame - cv::imshow("right", frame); - - for(auto& detection : detections) { - int x1 = detection.xmin * camRight->getResolutionHeight() + offsetX; - int y1 = detection.ymin * camRight->getResolutionHeight(); - int x2 = detection.xmax * camRight->getResolutionHeight() + offsetX; - int y2 = detection.ymax * camRight->getResolutionHeight(); - - int labelIndex = detection.label; - std::string labelStr = to_string(labelIndex); - if(labelIndex < labelMap.size()) { - labelStr = labelMap[labelIndex]; + + if(!frameDisparity.empty()) { + for(auto& detection : detections) { + int x1 = detection.xmin * monoRight->getResolutionHeight() + offsetX; + int y1 = detection.ymin * monoRight->getResolutionHeight(); + int x2 = detection.xmax * monoRight->getResolutionHeight() + offsetX; + int y2 = detection.ymax * monoRight->getResolutionHeight(); + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frameDisparity, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frameDisparity, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frameDisparity, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } - cv::putText(frameDisparity, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; - cv::putText(frameDisparity, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - cv::rectangle(frameDisparity, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + // Show the disparity frame + cv::imshow("disparity", frameDisparity); } - // Show the disparity frame - cv::imshow("disparity", frameDisparity); - - for(auto& detection : detections) { - int x1 = detection.xmin * frameManip.cols; - int y1 = detection.ymin * frameManip.rows; - int x2 = detection.xmax * frameManip.cols; - int y2 = detection.ymax * frameManip.rows; - - int labelIndex = detection.label; - std::string labelStr = to_string(labelIndex); - if(labelIndex < labelMap.size()) { - labelStr = labelMap[labelIndex]; + + if(!frameManip.empty()) { + for(auto& detection : detections) { + int x1 = detection.xmin * frameManip.cols; + int y1 = detection.ymin * frameManip.rows; + int x2 = detection.xmax * frameManip.cols; + int y2 = detection.ymax * frameManip.rows; + + int labelIndex = detection.label; + std::string labelStr = to_string(labelIndex); + if(labelIndex < labelMap.size()) { + labelStr = labelMap[labelIndex]; + } + cv::putText(frameManip, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + std::stringstream confStr; + confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; + cv::putText(frameManip, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::rectangle(frameManip, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } - cv::putText(frameManip, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - std::stringstream confStr; - confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; - cv::putText(frameManip, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); - cv::rectangle(frameManip, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); + // Show the manip frame + cv::imshow("manip", frameManip); } - // Show the manip frame - cv::imshow("manip", frameManip); int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { - cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << endl; - cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << endl; - return 0; + break; } } + cout << "To view the encoded data, convert the stream file (.h265) into a video file (.mp4), using a command below:" << endl; + cout << "ffmpeg -framerate 30 -i video.h265 -c copy video.mp4" << endl; return 0; } diff --git a/examples/src/rgb_full_resolution_saver.cpp b/examples/src/rgb_full_resolution_saver.cpp index 785c4f964..69b6eaf8f 100644 --- a/examples/src/rgb_full_resolution_saver.cpp +++ b/examples/src/rgb_full_resolution_saver.cpp @@ -56,7 +56,9 @@ int main() { } int key = cv::waitKey(1); - if(key == 'q' || key == 'Q') return 0; + if(key == 'q' || key == 'Q') { + return 0; + } } return 0; } diff --git a/examples/src/rgb_mobilenet.cpp b/examples/src/rgb_mobilenet.cpp index 31bbce199..4186a9446 100644 --- a/examples/src/rgb_mobilenet.cpp +++ b/examples/src/rgb_mobilenet.cpp @@ -68,6 +68,13 @@ int main(int argc, char** argv) { auto qRgb = device.getOutputQueue("rgb", 4, false); auto qDet = device.getOutputQueue("nn", 4, false); + cv::Mat frame; + std::vector detections; + auto startTime = steady_clock::now(); + int counter = 0; + float fps = 0; + auto color2 = cv::Scalar(255, 255, 255); + // Add bounding boxes and text to the frame and show it to the user auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { auto color = cv::Scalar(255, 0, 0); @@ -93,15 +100,17 @@ int main(int argc, char** argv) { cv::imshow(name, frame); }; - auto startTime = steady_clock::now(); - int counter = 0; - float fps = 0; - while(true) { - auto inRgb = qRgb->get(); - auto inDet = qDet->get(); - auto detections = inDet->detections; - cv::Mat frame = inRgb->getCvFrame(); + std::shared_ptr inRgb; + std::shared_ptr inDet; + + if(syncNN) { + inRgb = qRgb->get(); + inDet = qDet->get(); + } else { + inRgb = qRgb->tryGet(); + inDet = qDet->tryGet(); + } counter++; auto currentTime = steady_clock::now(); @@ -112,11 +121,20 @@ int main(int argc, char** argv) { startTime = currentTime; } - std::stringstream fpsStr; - fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps; - cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, 255, 0, 0); + if(inRgb) { + frame = inRgb->getCvFrame(); + std::stringstream fpsStr; + fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps; + cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color2); + } + + if(inDet) { + detections = inDet->detections; + } - displayFrame("video", frame, detections); + if(!frame.empty()) { + displayFrame("video", frame, detections); + } int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { diff --git a/examples/src/rgb_mobilenet_4k.cpp b/examples/src/rgb_mobilenet_4k.cpp index aa498bfe4..449b79332 100644 --- a/examples/src/rgb_mobilenet_4k.cpp +++ b/examples/src/rgb_mobilenet_4k.cpp @@ -57,11 +57,15 @@ int main(int argc, char** argv) { // Connect to device and start pipeline dai::Device device(pipeline); - // Queues + // Output queues will be used to get the frames and nn data from the outputs defined above auto qVideo = device.getOutputQueue("video", 4, false); auto qPreview = device.getOutputQueue("preview", 4, false); auto qDet = device.getOutputQueue("nn", 4, false); + cv::Mat previewFrame; + cv::Mat videoFrame; + std::vector detections; + // Add bounding boxes and text to the frame and show it to the user auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { auto color = cv::Scalar(255, 0, 0); @@ -92,15 +96,30 @@ int main(int argc, char** argv) { cout << "Resize video window with mouse drag!" << endl; while(true) { - auto inVideo = qVideo->get(); - auto inPreview = qPreview->get(); - auto inDet = qDet->get(); - auto detections = inDet->detections; - cv::Mat videoFrame = inVideo->getCvFrame(); - cv::Mat previewFrame = inPreview->getCvFrame(); - - displayFrame("video", videoFrame, detections); - displayFrame("preview", previewFrame, detections); + // Instead of get (blocking), we use tryGet (nonblocking) which will return the available data or None otherwise + auto inVideo = qVideo->tryGet(); + auto inPreview = qPreview->tryGet(); + auto inDet = qDet->tryGet(); + + if(inVideo) { + videoFrame = inVideo->getCvFrame(); + } + + if(inPreview) { + previewFrame = inPreview->getCvFrame(); + } + + if(inDet) { + detections = inDet->detections; + } + + if(!videoFrame.empty()) { + displayFrame("video", videoFrame, detections); + } + + if(!previewFrame.empty()) { + displayFrame("preview", previewFrame, detections); + } int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { diff --git a/examples/src/image_manip_warp.cpp b/examples/src/rgb_rotate_warp.cpp similarity index 100% rename from examples/src/image_manip_warp.cpp rename to examples/src/rgb_rotate_warp.cpp diff --git a/examples/src/spatial_location_calculator.cpp b/examples/src/spatial_location_calculator.cpp index c09c33dbc..05137bfef 100644 --- a/examples/src/spatial_location_calculator.cpp +++ b/examples/src/spatial_location_calculator.cpp @@ -67,12 +67,13 @@ int main() { // Connect to device and start pipeline dai::Device device(pipeline); + // Output queue will be used to get the depth frames from the outputs defined above auto depthQueue = device.getOutputQueue("depth", 8, false); auto spatialCalcQueue = device.getOutputQueue("spatialData", 8, false); auto spatialCalcConfigInQueue = device.getInputQueue("spatialCalcConfig"); - cv::Mat depthFrame; auto color = cv::Scalar(255, 255, 255); + std::cout << "Use WASD keys to move ROI!" << std::endl; while(true) { @@ -104,13 +105,12 @@ int main() { depthZ << "Z: " << (int)depthData.spatialCoordinates.z << " mm"; cv::putText(depthFrameColor, depthZ.str(), cv::Point(xmin + 10, ymin + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); } - + // Show the frame cv::imshow("depth", depthFrameColor); int key = cv::waitKey(1); switch(key) { case 'q': - return 0; break; case 'w': if(topLeft.y - stepSize >= 0) { diff --git a/examples/src/spatial_mobilenet.cpp b/examples/src/spatial_mobilenet.cpp index 9a822d66e..ba72f1ad2 100644 --- a/examples/src/spatial_mobilenet.cpp +++ b/examples/src/spatial_mobilenet.cpp @@ -99,14 +99,24 @@ int main(int argc, char** argv) { auto inDet = detectionNNQueue->get(); auto depth = depthQueue->get(); - auto detections = inDet->detections; + counter++; + auto currentTime = steady_clock::now(); + auto elapsed = duration_cast>(currentTime - startTime); + if(elapsed > seconds(1)) { + fps = counter / elapsed.count(); + counter = 0; + startTime = currentTime; + } + cv::Mat frame = inPreview->getCvFrame(); cv::Mat depthFrame = depth->getFrame(); + cv::Mat depthFrameColor; cv::normalize(depthFrame, depthFrameColor, 255, 0, cv::NORM_INF, CV_8UC1); cv::equalizeHist(depthFrameColor, depthFrameColor); cv::applyColorMap(depthFrameColor, depthFrameColor, cv::COLORMAP_HOT); + auto detections = inDet->detections; if(!detections.empty()) { auto boundingBoxMapping = xoutBoundingBoxDepthMappingQueue->get(); auto roiDatas = boundingBoxMapping->getConfigData(); @@ -124,16 +134,6 @@ int main(int argc, char** argv) { cv::rectangle(depthFrameColor, cv::Rect(cv::Point(xmin, ymin), cv::Point(xmax, ymax)), color, cv::FONT_HERSHEY_SIMPLEX); } } - counter++; - auto currentTime = steady_clock::now(); - auto elapsed = duration_cast>(currentTime - startTime); - if(elapsed > seconds(1)) { - fps = counter / elapsed.count(); - counter = 0; - startTime = currentTime; - } - - cv::Mat frame = inPreview->getCvFrame(); for(const auto& detection : detections) { int x1 = detection.xmin * frame.cols; @@ -146,30 +146,30 @@ int main(int argc, char** argv) { if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream confStr; confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; - cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthX; depthX << "X: " << (int)detection.spatialCoordinates.x << " mm"; - cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthY; depthY << "Y: " << (int)detection.spatialCoordinates.y << " mm"; - cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthZ; depthZ << "Z: " << (int)detection.spatialCoordinates.z << " mm"; - cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } std::stringstream fpsStr; - fpsStr << std::fixed << std::setprecision(2) << fps; + fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps; cv::putText(frame, fpsStr.str(), cv::Point(2, inPreview->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); cv::imshow("depth", depthFrameColor); - cv::imshow("rgb", frame); + cv::imshow("preview", frame); int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { diff --git a/examples/src/spatial_mobilenet_mono.cpp b/examples/src/spatial_mobilenet_mono.cpp index 9eac14972..960d14207 100644 --- a/examples/src/spatial_mobilenet_mono.cpp +++ b/examples/src/spatial_mobilenet_mono.cpp @@ -134,7 +134,6 @@ int main(int argc, char** argv) { auto ymin = (int)topLeft.y; auto xmax = (int)bottomRight.x; auto ymax = (int)bottomRight.y; - cv::rectangle(depthFrameColor, cv::Rect(cv::Point(xmin, ymin), cv::Point(xmax, ymax)), color, cv::FONT_HERSHEY_SIMPLEX); } } @@ -155,20 +154,20 @@ int main(int argc, char** argv) { if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } - cv::putText(rectifiedRight, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(rectifiedRight, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream confStr; confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; - cv::putText(rectifiedRight, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(rectifiedRight, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthX; depthX << "X: " << (int)detection.spatialCoordinates.x << " mm"; - cv::putText(rectifiedRight, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(rectifiedRight, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthY; depthY << "Y: " << (int)detection.spatialCoordinates.y << " mm"; - cv::putText(rectifiedRight, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(rectifiedRight, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthZ; depthZ << "Z: " << (int)detection.spatialCoordinates.z << " mm"; - cv::putText(rectifiedRight, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(rectifiedRight, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); cv::rectangle(rectifiedRight, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } diff --git a/examples/src/spatial_object_tracker.cpp b/examples/src/spatial_object_tracker.cpp index e1faf9119..b54ceca87 100644 --- a/examples/src/spatial_object_tracker.cpp +++ b/examples/src/spatial_object_tracker.cpp @@ -9,7 +9,7 @@ static const std::vector labelMap = {"background", "aeroplane", "bi "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"}; -static std::atomic syncNN{true}; +static std::atomic fullFrameTracking{false}; int main(int argc, char** argv) { using namespace std; @@ -63,7 +63,9 @@ int main(int argc, char** argv) { spatialDetectionNetwork->setDepthUpperThreshold(5000); objectTracker->setDetectionLabelsToTrack({15}); // track only person + // possible tracking types: ZERO_TERM_COLOR_HISTOGRAM, ZERO_TERM_IMAGELESS objectTracker->setTrackerType(dai::TrackerType::ZERO_TERM_COLOR_HISTOGRAM); + // take the smallest ID when new object is tracked, possible options: SMALLEST_ID, UNIQUE_ID objectTracker->setTrackerIdAssigmentPolicy(dai::TrackerIdAssigmentPolicy::SMALLEST_ID); // Linking @@ -71,18 +73,21 @@ int main(int argc, char** argv) { monoRight->out.link(stereo->right); camRgb->preview.link(spatialDetectionNetwork->input); - if(syncNN) { - objectTracker->passthroughTrackerFrame.link(xoutRgb->input); + objectTracker->passthroughTrackerFrame.link(xoutRgb->input); + objectTracker->out.link(trackerOut->input); + + if(fullFrameTracking) { + camRgb->setPreviewKeepAspectRatio(false); + camRgb->video.link(objectTracker->inputTrackerFrame); + objectTracker->inputTrackerFrame.setBlocking(false); + // do not block the pipeline if it's too slow on full frame + objectTracker->inputTrackerFrame.setQueueSize(2); } else { - camRgb->preview.link(xoutRgb->input); + spatialDetectionNetwork->passthrough.link(objectTracker->inputTrackerFrame); } - objectTracker->out.link(trackerOut->input); - - spatialDetectionNetwork->passthrough.link(objectTracker->inputTrackerFrame); spatialDetectionNetwork->passthrough.link(objectTracker->inputDetectionFrame); spatialDetectionNetwork->out.link(objectTracker->inputDetections); - stereo->depth.link(spatialDetectionNetwork->inputDepth); // Connect to device and start pipeline @@ -94,7 +99,7 @@ int main(int argc, char** argv) { auto startTime = steady_clock::now(); int counter = 0; float fps = 0; - auto color = cv::Scalar(255, 0, 0); + auto color = cv::Scalar(255, 255, 255); while(true) { auto imgFrame = preview->get(); @@ -123,30 +128,30 @@ int main(int argc, char** argv) { if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream idStr; idStr << "ID: " << t.id; - cv::putText(frame, idStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, idStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream statusStr; statusStr << "Status: " << t.status; - cv::putText(frame, statusStr.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, statusStr.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthX; depthX << "X: " << (int)t.spatialCoordinates.x << " mm"; - cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthY; depthY << "Y: " << (int)t.spatialCoordinates.y << " mm"; - cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthZ; depthZ << "Z: " << (int)t.spatialCoordinates.z << " mm"; - cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 95), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 95), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } std::stringstream fpsStr; - fpsStr << std::fixed << std::setprecision(2) << fps; + fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps; cv::putText(frame, fpsStr.str(), cv::Point(2, imgFrame->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color); cv::imshow("tracker", frame); diff --git a/examples/src/spatial_tiny_yolo.cpp b/examples/src/spatial_tiny_yolo.cpp index 2e4cd8717..5371c5442 100644 --- a/examples/src/spatial_tiny_yolo.cpp +++ b/examples/src/spatial_tiny_yolo.cpp @@ -99,6 +99,7 @@ int main(int argc, char** argv) { // Connect to device and start pipeline dai::Device device(pipeline); + // Output queues will be used to get the rgb frames and nn data from the outputs defined above auto previewQueue = device.getOutputQueue("rgb", 4, false); auto detectionNNQueue = device.getOutputQueue("detections", 4, false); auto xoutBoundingBoxDepthMappingQueue = device.getOutputQueue("boundingBoxDepthMapping", 4, false); @@ -160,20 +161,20 @@ int main(int argc, char** argv) { if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream confStr; confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; - cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 35), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthX; depthX << "X: " << (int)detection.spatialCoordinates.x << " mm"; - cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, depthX.str(), cv::Point(x1 + 10, y1 + 50), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthY; depthY << "Y: " << (int)detection.spatialCoordinates.y << " mm"; - cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, depthY.str(), cv::Point(x1 + 10, y1 + 65), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream depthZ; depthZ << "Z: " << (int)detection.spatialCoordinates.z << " mm"; - cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, depthZ.str(), cv::Point(x1 + 10, y1 + 80), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } diff --git a/examples/src/system_information.cpp b/examples/src/system_information.cpp index a84890fa6..be4a11713 100644 --- a/examples/src/system_information.cpp +++ b/examples/src/system_information.cpp @@ -33,11 +33,8 @@ int main() { // Linking sysLog->out.link(xout->input); - // Connect to device - dai::Device device; - // Connect to device and start pipeline - device.startPipeline(pipeline); + dai::Device device(pipeline); // Output queue will be used to get the system info auto qSysInfo = device.getOutputQueue("sysinfo", 4, false); diff --git a/examples/src/tiny_yolo_v3_device_side_decoding.cpp b/examples/src/tiny_yolo_v3_device_side_decoding.cpp index 36a73c788..2968fcacf 100644 --- a/examples/src/tiny_yolo_v3_device_side_decoding.cpp +++ b/examples/src/tiny_yolo_v3_device_side_decoding.cpp @@ -76,6 +76,13 @@ int main(int argc, char** argv) { auto qRgb = device.getOutputQueue("rgb", 4, false); auto qDet = device.getOutputQueue("nn", 4, false); + cv::Mat frame; + std::vector detections; + auto startTime = steady_clock::now(); + int counter = 0; + float fps = 0; + auto color2 = cv::Scalar(255, 255, 255); + // Add bounding boxes and text to the frame and show it to the user auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { auto color = cv::Scalar(255, 0, 0); @@ -101,15 +108,17 @@ int main(int argc, char** argv) { cv::imshow(name, frame); }; - auto startTime = steady_clock::now(); - int counter = 0; - float fps = 0; - while(true) { - auto inRgb = qRgb->get(); - auto inDet = qDet->get(); - auto detections = inDet->detections; - cv::Mat frame = inRgb->getCvFrame(); + std::shared_ptr inRgb; + std::shared_ptr inDet; + + if(syncNN) { + inRgb = qRgb->get(); + inDet = qDet->get(); + } else { + inRgb = qRgb->tryGet(); + inDet = qDet->tryGet(); + } counter++; auto currentTime = steady_clock::now(); @@ -120,11 +129,20 @@ int main(int argc, char** argv) { startTime = currentTime; } - std::stringstream fpsStr; - fpsStr << std::fixed << std::setprecision(2) << fps; - cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, 255, 0, 0); + if(inRgb) { + frame = inRgb->getCvFrame(); + std::stringstream fpsStr; + fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps; + cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color2); + } + + if(inDet) { + detections = inDet->detections; + } - displayFrame("video", frame, detections); + if(!frame.empty()) { + displayFrame("rgb", frame, detections); + } int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { diff --git a/examples/src/tiny_yolo_v4_device_side_decoding.cpp b/examples/src/tiny_yolo_v4_device_side_decoding.cpp index 3309492cb..0221f1c7d 100644 --- a/examples/src/tiny_yolo_v4_device_side_decoding.cpp +++ b/examples/src/tiny_yolo_v4_device_side_decoding.cpp @@ -82,6 +82,13 @@ int main(int argc, char** argv) { auto qRgb = device.getOutputQueue("rgb", 4, false); auto qDet = device.getOutputQueue("detections", 4, false); + cv::Mat frame; + std::vector detections; + auto startTime = steady_clock::now(); + int counter = 0; + float fps = 0; + auto color2 = cv::Scalar(255, 255, 255); + // Add bounding boxes and text to the frame and show it to the user auto displayFrame = [](std::string name, cv::Mat frame, std::vector& detections) { auto color = cv::Scalar(255, 0, 0); @@ -97,25 +104,27 @@ int main(int argc, char** argv) { if(labelIndex < labelMap.size()) { labelStr = labelMap[labelIndex]; } - cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, labelStr, cv::Point(x1 + 10, y1 + 20), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); std::stringstream confStr; confStr << std::fixed << std::setprecision(2) << detection.confidence * 100; - cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, color); + cv::putText(frame, confStr.str(), cv::Point(x1 + 10, y1 + 40), cv::FONT_HERSHEY_TRIPLEX, 0.5, 255); cv::rectangle(frame, cv::Rect(cv::Point(x1, y1), cv::Point(x2, y2)), color, cv::FONT_HERSHEY_SIMPLEX); } // Show the frame cv::imshow(name, frame); }; - auto startTime = steady_clock::now(); - int counter = 0; - float fps = 0; - while(true) { - auto inRgb = qRgb->get(); - auto inDet = qDet->get(); - auto detections = inDet->detections; - cv::Mat frame = inRgb->getCvFrame(); + std::shared_ptr inRgb; + std::shared_ptr inDet; + + if(syncNN) { + inRgb = qRgb->get(); + inDet = qDet->get(); + } else { + inRgb = qRgb->tryGet(); + inDet = qDet->tryGet(); + } counter++; auto currentTime = steady_clock::now(); @@ -126,11 +135,20 @@ int main(int argc, char** argv) { startTime = currentTime; } - std::stringstream fpsStr; - fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps; - cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, 255, 0, 0); + if(inRgb) { + frame = inRgb->getCvFrame(); + std::stringstream fpsStr; + fpsStr << "NN fps: " << std::fixed << std::setprecision(2) << fps; + cv::putText(frame, fpsStr.str(), cv::Point(2, inRgb->getHeight() - 4), cv::FONT_HERSHEY_TRIPLEX, 0.4, color2); + } + + if(inDet) { + detections = inDet->detections; + } - displayFrame("video", frame, detections); + if(!frame.empty()) { + displayFrame("rgb", frame, detections); + } int key = cv::waitKey(1); if(key == 'q' || key == 'Q') { From 8378d6c3e4616bff35415e70fa547f50a1a3e571 Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Wed, 19 May 2021 10:58:54 +0300 Subject: [PATCH 47/56] Update FW: stereo fixes: LR-check flip, `depth` align to RGB --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index 1108f06d6..b2a82408d 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "2ca9f51f803f2ebb587ffb7fdcea01d6638bdf6a") +set(DEPTHAI_DEVICE_SIDE_COMMIT "d26fdab6b0a66b09f0b3b6ca8d0dea50229278b7") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") From 3b9235744a5038c1398b97f65eaed1c248fdd9fc Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Wed, 19 May 2021 11:01:52 +0300 Subject: [PATCH 48/56] ImgFrame CV conversion: more verbose about size mismatch --- src/opencv/ImgFrame.cpp | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) mode change 100755 => 100644 src/opencv/ImgFrame.cpp diff --git a/src/opencv/ImgFrame.cpp b/src/opencv/ImgFrame.cpp old mode 100755 new mode 100644 index 658f8aa30..7f3920f93 --- a/src/opencv/ImgFrame.cpp +++ b/src/opencv/ImgFrame.cpp @@ -2,6 +2,8 @@ #include +#include "spdlog/spdlog.h" + namespace dai { void ImgFrame::setFrame(cv::Mat frame) { @@ -64,8 +66,12 @@ cv::Mat ImgFrame::getFrame(bool deepCopy) { // Check if enough data long requiredSize = CV_ELEM_SIZE(type) * size.area(); - if(static_cast(img.data.size()) < requiredSize) { - throw std::runtime_error("ImgFrame doesn't have enough data to encode specified frame. Maybe metadataOnly transfer was made?"); + long actualSize = static_cast(img.data.size()); + if(actualSize < requiredSize) { + throw std::runtime_error("ImgFrame doesn't have enough data to encode specified frame, required " + std::to_string(requiredSize) + ", actual " + + std::to_string(actualSize) + ". Maybe metadataOnly transfer was made?"); + } else if(actualSize > requiredSize) { + spdlog::warn("ImgFrame has excess data: actual {}, expected {}", actualSize, requiredSize); } if(getWidth() <= 0 || getHeight() <= 0) { throw std::runtime_error("ImgFrame metadata not valid (width or height = 0)"); From ed7698d7146a5b633be09df6ee9420e27c5c0239 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Fri, 21 May 2021 06:19:58 +0300 Subject: [PATCH 49/56] Update FW with fix for random crashes (kernel crash on RPI/jetson) --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- src/device/Device.cpp | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index 1108f06d6..e10023edb 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "2ca9f51f803f2ebb587ffb7fdcea01d6638bdf6a") +set(DEPTHAI_DEVICE_SIDE_COMMIT "cbed8ccd32c87d1e3a0cb66cbe1c342f9a8c8950") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/src/device/Device.cpp b/src/device/Device.cpp index a614d95a7..2d6f5c834 100644 --- a/src/device/Device.cpp +++ b/src/device/Device.cpp @@ -468,8 +468,7 @@ void Device::init2(bool embeddedMvcmd, bool usb2Mode, const std::string& pathToM client = std::unique_ptr>( new nanorpc::core::client([this](nanorpc::core::type::buffer request) { - // TODO(TheMarpe) - causes issues on Windows - // std::unique_lock lock(this->rpcMutex); + std::unique_lock lock(this->rpcMutex); // Log the request data if(spdlog::get_level() == spdlog::level::trace) { From 63727bcbd3c40ef30abefd7b825d21cb60ed4396 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Fri, 21 May 2021 21:08:05 +0300 Subject: [PATCH 50/56] Update FW --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index e10023edb..d4864c079 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "cbed8ccd32c87d1e3a0cb66cbe1c342f9a8c8950") +set(DEPTHAI_DEVICE_SIDE_COMMIT "4e2882999984a6262facff1c12b94c1a2c115682") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") From e14fd3990c8191066b87718aea40335b8e4de48d Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Fri, 21 May 2021 21:11:41 +0300 Subject: [PATCH 51/56] MonoCamera: add `raw` output. Update FW: OV9282 min autoexposure 20us --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- include/depthai/pipeline/node/MonoCamera.hpp | 9 ++++++++- src/pipeline/node/MonoCamera.cpp | 2 +- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index 84920637e..a17cd5911 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "6fcedbe811789bb36181f06639b84a0bf01d2172") +set(DEPTHAI_DEVICE_SIDE_COMMIT "48273e2911a1f511ed3a8426944e1ff0e24e253e") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/include/depthai/pipeline/node/MonoCamera.hpp b/include/depthai/pipeline/node/MonoCamera.hpp index 78a57f5bf..fe3b81314 100644 --- a/include/depthai/pipeline/node/MonoCamera.hpp +++ b/include/depthai/pipeline/node/MonoCamera.hpp @@ -46,10 +46,17 @@ class MonoCamera : public Node { /** * Outputs ImgFrame message that carries RAW8 encoded (grayscale) frame data. * - * Suitable for use StereoDepth node + * Suitable for use StereoDepth node. Processed by ISP */ Output out{*this, "out", Output::Type::MSender, {{DatatypeEnum::ImgFrame, false}}}; + /** + * Outputs ImgFrame message that carries RAW10-packed (MIPI CSI-2 format) frame data. + * + * Captured directly from the camera sensor + */ + Output raw{*this, "raw", Output::Type::MSender, {{DatatypeEnum::ImgFrame, false}}}; + /** * Specify which board socket to use * @param boardSocket Board socket to use diff --git a/src/pipeline/node/MonoCamera.cpp b/src/pipeline/node/MonoCamera.cpp index 9191e117c..32ee64483 100644 --- a/src/pipeline/node/MonoCamera.cpp +++ b/src/pipeline/node/MonoCamera.cpp @@ -17,7 +17,7 @@ std::string MonoCamera::getName() const { } std::vector MonoCamera::getOutputs() { - return {out}; + return {out, raw}; } std::vector MonoCamera::getInputs() { From 11d2a4d6bfc27eeecc04c64fd747223e7c224efd Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Sat, 22 May 2021 04:21:56 +0300 Subject: [PATCH 52/56] Update FW --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index d4864c079..9c4215ee3 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "4e2882999984a6262facff1c12b94c1a2c115682") +set(DEPTHAI_DEVICE_SIDE_COMMIT "cd8f39616c6cd09899c29faa2191e645b60d7235") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") From f45b13bd3959f1b54c8e8c8da672f334a7ccbb93 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Sun, 23 May 2021 00:06:07 +0300 Subject: [PATCH 53/56] Revert RPC mutex lock; it's reported that has issues on Windows --- src/device/Device.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/device/Device.cpp b/src/device/Device.cpp index 2d6f5c834..a614d95a7 100644 --- a/src/device/Device.cpp +++ b/src/device/Device.cpp @@ -468,7 +468,8 @@ void Device::init2(bool embeddedMvcmd, bool usb2Mode, const std::string& pathToM client = std::unique_ptr>( new nanorpc::core::client([this](nanorpc::core::type::buffer request) { - std::unique_lock lock(this->rpcMutex); + // TODO(TheMarpe) - causes issues on Windows + // std::unique_lock lock(this->rpcMutex); // Log the request data if(spdlog::get_level() == spdlog::level::trace) { From 1f3bed069159a966018ee2430c0b2ff519cbf27e Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Mon, 24 May 2021 17:03:37 +0300 Subject: [PATCH 54/56] Update FW, update `setRectifyMirrorFrame` functionaliy/description --- cmake/Depthai/DepthaiDeviceSideConfig.cmake | 2 +- include/depthai/pipeline/node/StereoDepth.hpp | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cmake/Depthai/DepthaiDeviceSideConfig.cmake b/cmake/Depthai/DepthaiDeviceSideConfig.cmake index b2a82408d..5526bc556 100644 --- a/cmake/Depthai/DepthaiDeviceSideConfig.cmake +++ b/cmake/Depthai/DepthaiDeviceSideConfig.cmake @@ -2,7 +2,7 @@ set(DEPTHAI_DEVICE_SIDE_MATURITY "snapshot") # "full commit hash of device side binary" -set(DEPTHAI_DEVICE_SIDE_COMMIT "d26fdab6b0a66b09f0b3b6ca8d0dea50229278b7") +set(DEPTHAI_DEVICE_SIDE_COMMIT "556a856e85570e0d874d19b305a0080a8f1f58bf") # "version if applicable" set(DEPTHAI_DEVICE_SIDE_VERSION "") diff --git a/include/depthai/pipeline/node/StereoDepth.hpp b/include/depthai/pipeline/node/StereoDepth.hpp index 407226092..926139131 100644 --- a/include/depthai/pipeline/node/StereoDepth.hpp +++ b/include/depthai/pipeline/node/StereoDepth.hpp @@ -155,8 +155,9 @@ class StereoDepth : public Node { * The mirroring is required to have a normal non-mirrored disparity/depth output. * * A side effect of this option is disparity alignment to the perspective of left or right input: - * - LR-check disabled: `false`: mapped to left and mirrored, `true`: mapped to right; - * - LR-check enabled: `false`: mapped to right, `true`: mapped to left, never mirrored. + * `false`: mapped to left and mirrored, `true`: mapped to right. + * With LR-check enabled, this option is ignored, none of the outputs are mirrored, + * and disparity is mapped to right. * * @param enable True for normal disparity/depth, otherwise mirrored */ From f899a93b442b8d63e58a8698513b14c77efed0bb Mon Sep 17 00:00:00 2001 From: alex-luxonis Date: Mon, 24 May 2021 18:39:39 +0300 Subject: [PATCH 55/56] Comment out for now ImgFrame excess data warning, doesn't build on Windows --- src/opencv/ImgFrame.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/opencv/ImgFrame.cpp b/src/opencv/ImgFrame.cpp index 7f3920f93..0dfe6d562 100644 --- a/src/opencv/ImgFrame.cpp +++ b/src/opencv/ImgFrame.cpp @@ -2,7 +2,7 @@ #include -#include "spdlog/spdlog.h" +// #include "spdlog/spdlog.h" namespace dai { @@ -71,7 +71,8 @@ cv::Mat ImgFrame::getFrame(bool deepCopy) { throw std::runtime_error("ImgFrame doesn't have enough data to encode specified frame, required " + std::to_string(requiredSize) + ", actual " + std::to_string(actualSize) + ". Maybe metadataOnly transfer was made?"); } else if(actualSize > requiredSize) { - spdlog::warn("ImgFrame has excess data: actual {}, expected {}", actualSize, requiredSize); + // FIXME doesn't build on Windows (multiple definitions during link) + // spdlog::warn("ImgFrame has excess data: actual {}, expected {}", actualSize, requiredSize); } if(getWidth() <= 0 || getHeight() <= 0) { throw std::runtime_error("ImgFrame metadata not valid (width or height = 0)"); From 925b805e2e16748df6f55a881abfe0c1d0d66f72 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Mon, 24 May 2021 20:36:19 +0300 Subject: [PATCH 56/56] Bump version to 2.4.0 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ea0ae3be7..669541110 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,7 +26,7 @@ if(WIN32) endif() # Create depthai project -project(depthai VERSION "2.3.0" LANGUAGES CXX C) +project(depthai VERSION "2.4.0" LANGUAGES CXX C) get_directory_property(has_parent PARENT_DIRECTORY) if(has_parent) set(DEPTHAI_VERSION ${PROJECT_VERSION} PARENT_SCOPE)