From 62db8e038450b9b65f3e4cd7414cf39b47347b4b Mon Sep 17 00:00:00 2001 From: Jon Ngai Date: Fri, 19 Feb 2021 09:17:16 -0700 Subject: [PATCH 01/40] Adding micropython. --- depthai-core | 2 +- src/pipeline/NodeBindings.cpp | 7 +++++++ src/pipeline/PipelineBindings.cpp | 2 ++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 77d1bc720..a61b4d3b9 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 77d1bc720bf989cd2afaf4ddae012c593497b940 +Subproject commit a61b4d3b95bf3e641ed588061f29289ce76efc30 diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index a1177bfde..268f7ed1d 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -12,6 +12,7 @@ #include "depthai/pipeline/node/SPIOut.hpp" #include "depthai/pipeline/node/DetectionNetwork.hpp" #include "depthai/pipeline/node/SystemLogger.hpp" +#include "depthai/pipeline/node/Micropython.hpp" // Libraries #include "hedley/hedley.h" @@ -407,4 +408,10 @@ void NodeBindings::bind(pybind11::module& m){ + // Micropython node + py::class_>(m, "Micropython") + .def_readonly("input", &Micropython::input) + .def("setBlobPath", &Micropython::setBlobPath) + .def("setNumPoolFrames", &Micropython::setNumPoolFrames) + ; } diff --git a/src/pipeline/PipelineBindings.cpp b/src/pipeline/PipelineBindings.cpp index 306f30548..df198f907 100644 --- a/src/pipeline/PipelineBindings.cpp +++ b/src/pipeline/PipelineBindings.cpp @@ -14,6 +14,7 @@ #include "depthai/pipeline/node/MonoCamera.hpp" #include "depthai/pipeline/node/StereoDepth.hpp" #include "depthai/pipeline/node/DetectionNetwork.hpp" +#include "depthai/pipeline/node/Micropython.hpp" // depthai-shared #include "depthai-shared/pb/properties/GlobalProperties.hpp" @@ -65,6 +66,7 @@ void PipelineBindings::bind(pybind11::module& m){ .def("createStereoDepth", &Pipeline::create) .def("createMobileNetDetectionNetwork", &Pipeline::create) .def("createYoloDetectionNetwork", &Pipeline::create) + .def("createMicropython", &Pipeline::create) ; From 423b72cea7768bfaaa20403fd7ee367ffdcdec0c Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Mon, 22 Feb 2021 01:03:54 +0100 Subject: [PATCH 02/40] Pipeline::create and dai.node submodule capability --- depthai-core | 2 +- src/pipeline/NodeBindings.cpp | 87 ++++++++++++++++++++----------- src/pipeline/NodeBindings.hpp | 1 + src/pipeline/PipelineBindings.cpp | 30 +++++++++-- 4 files changed, 87 insertions(+), 33 deletions(-) diff --git a/depthai-core b/depthai-core index a1c1b310f..847a7f357 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit a1c1b310f3f6aba77f7492f6549151692d9cb74a +Subproject commit 847a7f357c8b719584cf63dd043c60ae18140e20 diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index 52fd24200..960a31bb3 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -1,5 +1,6 @@ #include "NodeBindings.hpp" +#include "depthai/pipeline/Pipeline.hpp" #include "depthai/pipeline/Node.hpp" #include "depthai/pipeline/node/XLinkIn.hpp" #include "depthai/pipeline/node/XLinkOut.hpp" @@ -12,16 +13,35 @@ #include "depthai/pipeline/node/SPIOut.hpp" #include "depthai/pipeline/node/DetectionNetwork.hpp" #include "depthai/pipeline/node/SystemLogger.hpp" -#include "depthai/pipeline/node/Micropython.hpp" +#include "depthai/pipeline/node/MicroPython.hpp" // Libraries #include "hedley/hedley.h" +// Map of python node classes and call to pipeline to create it +std::vector(dai::Pipeline&)>>> pyNodeCreateMap; + +py::handle daiNodeModule; +template +py::class_ addNode(const char* name){ + auto node = py::class_>(daiNodeModule, name); + pyNodeCreateMap.push_back(std::make_pair(node, [](dai::Pipeline& p){ + return p.create(); + })); + return node; +} + +#define ADD_NODE(NodeName) addNode(#NodeName) +#define ADD_NODE_DERIVED(NodeName, Derived) addNode(#NodeName) + + +std::vector(dai::Pipeline&)>>> NodeBindings::getNodeCreateMap(){ + return pyNodeCreateMap; +} + void NodeBindings::bind(pybind11::module& m){ using namespace dai; - using namespace dai::node; - // Base 'Node' class binding py::class_> pyNode(m, "Node"); @@ -67,10 +87,18 @@ void NodeBindings::bind(pybind11::module& m){ // .def_readwrite("inputName", &dai::Node::Connection::inputName) // ; + + //// Bindings for actual nodes + // Create "namespace" (python submodule) for nodes + using namespace dai::node; + daiNodeModule = m; + // TODO(themarpe) - move properties into nodes and nodes under 'node' submodule + //daiNodeModule = m.def_submodule("node"); + // XLinkIn node - py::class_>(m, "XLinkIn") + ADD_NODE(XLinkIn) .def_readonly("out", &XLinkIn::out) .def("setStreamName", &XLinkIn::setStreamName, py::arg("streamName")) .def("setMaxDataSize", &XLinkIn::setMaxDataSize, py::arg("maxDataSize")) @@ -81,7 +109,7 @@ void NodeBindings::bind(pybind11::module& m){ ; // XLinkOut node - py::class_>(m, "XLinkOut") + ADD_NODE(XLinkOut) .def_readonly("input", &XLinkOut::input) .def("setStreamName", &XLinkOut::setStreamName, py::arg("streamName")) .def("setFpsLimit", &XLinkOut::setFpsLimit, py::arg("fpsLimit")) @@ -92,7 +120,7 @@ void NodeBindings::bind(pybind11::module& m){ ; // ColorCamera node - py::class_>(m, "ColorCamera") + ADD_NODE(ColorCamera) .def_readonly("inputConfig", &ColorCamera::inputConfig) .def_readonly("inputControl", &ColorCamera::inputControl) .def_readonly("initialControl", &ColorCamera::initialControl) @@ -156,7 +184,7 @@ void NodeBindings::bind(pybind11::module& m){ ; // NeuralNetwork node - py::class_>(m, "NeuralNetwork") + ADD_NODE(NeuralNetwork) .def_readonly("input", &NeuralNetwork::input) .def_readonly("out", &NeuralNetwork::out) .def_readonly("passthrough", &NeuralNetwork::passthrough) @@ -167,7 +195,7 @@ void NodeBindings::bind(pybind11::module& m){ ; // ImageManip node - py::class_>(m, "ImageManip") + ADD_NODE(ImageManip) .def_readonly("inputConfig", &ImageManip::inputConfig) .def_readonly("inputImage", &ImageManip::inputImage) .def_readonly("out", &ImageManip::out) @@ -235,7 +263,7 @@ void NodeBindings::bind(pybind11::module& m){ ; // MonoCamera node - py::class_>(m, "MonoCamera") + ADD_NODE(MonoCamera) .def_readonly("inputControl", &MonoCamera::inputControl) .def_readonly("out", &MonoCamera::out) .def_readonly("initialControl", &MonoCamera::initialControl) @@ -271,7 +299,7 @@ void NodeBindings::bind(pybind11::module& m){ // StereoDepth node - py::class_>(m, "StereoDepth") + ADD_NODE(StereoDepth) .def_readonly("left", &StereoDepth::left) .def_readonly("right", &StereoDepth::right) .def_readonly("depth", &StereoDepth::depth) @@ -296,7 +324,7 @@ void NodeBindings::bind(pybind11::module& m){ ; // VideoEncoder node - py::class_>(m, "VideoEncoder") + ADD_NODE(VideoEncoder) .def_readonly("input", &VideoEncoder::input) .def_readonly("bitstream", &VideoEncoder::bitstream) .def("setDefaultProfilePreset", (void(VideoEncoder::*)(std::tuple, float, VideoEncoderProperties::Profile))&VideoEncoder::setDefaultProfilePreset) @@ -325,13 +353,14 @@ void NodeBindings::bind(pybind11::module& m){ ; // SPIOut node - py::class_>(m, "SPIOut") + ADD_NODE(SPIOut) .def_readonly("input", &SPIOut::input) .def("setStreamName", &SPIOut::setStreamName) .def("setBusId", &SPIOut::setBusId) ; - py::class_>(m, "DetectionNetwork") + // Cannot be created + py::class_>(daiNodeModule, "DetectionNetwork") .def_readonly("input", &DetectionNetwork::input) .def_readonly("out", &DetectionNetwork::out) .def_readonly("passthrough", &DetectionNetwork::passthrough) @@ -339,11 +368,10 @@ void NodeBindings::bind(pybind11::module& m){ ; // MobileNetDetectionNetwork node - py::class_>(m, "MobileNetDetectionNetwork") - ; + ADD_NODE_DERIVED(MobileNetDetectionNetwork, DetectionNetwork); // YoloDetectionNetwork node - py::class_>(m, "YoloDetectionNetwork") + ADD_NODE_DERIVED(YoloDetectionNetwork, DetectionNetwork) .def("setNumClasses", &YoloDetectionNetwork::setNumClasses) .def("setCoordinateSize", &YoloDetectionNetwork::setCoordinateSize) .def("setAnchors", &YoloDetectionNetwork::setAnchors) @@ -352,16 +380,24 @@ void NodeBindings::bind(pybind11::module& m){ ; // SystemLogger node - py::class_>(m, "SystemLogger") + ADD_NODE(SystemLogger) .def_readonly("out", &SystemLogger::out) .def("setRate", &SystemLogger::setRate) ; + // MicroPython node + ADD_NODE(MicroPython) + .def_readonly("input", &MicroPython::input) + .def("setScriptPath", &MicroPython::setScriptPath) + ; + + + //////////////////////////////////// // Node properties bindings //////////////////////////////////// - py::class_ colorCameraProperties(m, "ColorCameraProperties"); + py::class_ colorCameraProperties(daiNodeModule, "ColorCameraProperties"); colorCameraProperties .def_readwrite("initialControl", &ColorCameraProperties::initialControl) .def_readwrite("boardSocket", &ColorCameraProperties::boardSocket) @@ -392,7 +428,7 @@ void NodeBindings::bind(pybind11::module& m){ // MonoCamera props - py::class_ monoCameraProperties(m, "MonoCameraProperties"); + py::class_ monoCameraProperties(daiNodeModule, "MonoCameraProperties"); monoCameraProperties .def_readwrite("initialControl", &MonoCameraProperties::initialControl) .def_readwrite("boardSocket", &MonoCameraProperties::boardSocket) @@ -409,7 +445,7 @@ void NodeBindings::bind(pybind11::module& m){ // StereoDepth props - py::class_ stereoDepthProperties(m, "StereoDepthProperties"); + py::class_ stereoDepthProperties(daiNodeModule, "StereoDepthProperties"); stereoDepthProperties .def_readwrite("calibration", &StereoDepthProperties::calibration) .def_readwrite("median", &StereoDepthProperties::median) @@ -434,7 +470,7 @@ void NodeBindings::bind(pybind11::module& m){ // VideoEncoder props - py::class_ videoEncoderProperties(m, "VideoEncoderProperties"); + py::class_ videoEncoderProperties(daiNodeModule, "VideoEncoderProperties"); videoEncoderProperties .def_readwrite("bitrate", &VideoEncoderProperties::bitrate) .def_readwrite("keyframeFrequency", &VideoEncoderProperties::keyframeFrequency) @@ -461,16 +497,9 @@ void NodeBindings::bind(pybind11::module& m){ .value("VBR", VideoEncoderProperties::RateControlMode::VBR) ; - py::class_(m, "SystemLoggerProperties") + py::class_(daiNodeModule, "SystemLoggerProperties") .def_readwrite("rateHz", &SystemLoggerProperties::rateHz) ; - - // Micropython node - py::class_>(m, "Micropython") - .def_readonly("input", &Micropython::input) - .def("setBlobPath", &Micropython::setBlobPath) - .def("setNumPoolFrames", &Micropython::setNumPoolFrames) - ; } diff --git a/src/pipeline/NodeBindings.hpp b/src/pipeline/NodeBindings.hpp index 9149a0aad..48ab99c99 100644 --- a/src/pipeline/NodeBindings.hpp +++ b/src/pipeline/NodeBindings.hpp @@ -8,4 +8,5 @@ struct NodeBindings : public dai::Node { static void bind(pybind11::module& m); + static std::vector(dai::Pipeline&)>>> getNodeCreateMap(); }; diff --git a/src/pipeline/PipelineBindings.cpp b/src/pipeline/PipelineBindings.cpp index eebac81d0..15294d545 100644 --- a/src/pipeline/PipelineBindings.cpp +++ b/src/pipeline/PipelineBindings.cpp @@ -1,4 +1,6 @@ + #include "PipelineBindings.hpp" +#include "NodeBindings.hpp" // depthai #include "depthai/pipeline/Pipeline.hpp" @@ -14,11 +16,25 @@ #include "depthai/pipeline/node/MonoCamera.hpp" #include "depthai/pipeline/node/StereoDepth.hpp" #include "depthai/pipeline/node/DetectionNetwork.hpp" -#include "depthai/pipeline/node/Micropython.hpp" +#include "depthai/pipeline/node/MicroPython.hpp" // depthai-shared #include "depthai-shared/properties/GlobalProperties.hpp" + + +std::shared_ptr createNode(dai::Pipeline& p, py::object class_){ + auto nodeCreateMap = NodeBindings::getNodeCreateMap(); + for(auto& kv : nodeCreateMap){ + auto& node = kv.first; + auto& create = kv.second; + if(node.is(class_)){ + return create(p); + } + } + return nullptr; +} + void PipelineBindings::bind(pybind11::module& m){ using namespace dai; @@ -56,6 +72,7 @@ void PipelineBindings::bind(pybind11::module& m){ .def("setOpenVINOVersion", &Pipeline::setOpenVINOVersion, py::arg("version") = Pipeline::DEFAULT_OPENVINO_VERSION) + // TODO(themarpe), deprecate in favor of 'create' // templated create function .def("createXLinkIn", &Pipeline::create) .def("createXLinkOut", &Pipeline::create) @@ -68,8 +85,15 @@ void PipelineBindings::bind(pybind11::module& m){ .def("createStereoDepth", &Pipeline::create) .def("createMobileNetDetectionNetwork", &Pipeline::create) .def("createYoloDetectionNetwork", &Pipeline::create) - .def("createMicropython", &Pipeline::create) + .def("createMicroPython", &Pipeline::create) + + .def("create", [](dai::Pipeline& p, py::object class_) { + auto node = createNode(p, class_); + if(node == nullptr){ + throw std::invalid_argument(std::string(py::str(class_)) + " is not a subclass of depthai.Node"); + } + return node; + }) ; - } From 0d5ecd3b3011bbf14624b264fa3bf6b71794c1a2 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Mon, 22 Feb 2021 12:55:43 +0100 Subject: [PATCH 03/40] Added Node::InputMap and Node::OutputMap --- depthai-core | 2 +- src/pipeline/CommonBindings.cpp | 6 +++ src/pipeline/NodeBindings.cpp | 88 ++++++++++++++++++++++++++++++++- 3 files changed, 93 insertions(+), 3 deletions(-) diff --git a/depthai-core b/depthai-core index 847a7f357..31aa576d8 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 847a7f357c8b719584cf63dd043c60ae18140e20 +Subproject commit 31aa576d88b4de011cb07322e44aaeadc4ff4ba7 diff --git a/src/pipeline/CommonBindings.cpp b/src/pipeline/CommonBindings.cpp index 57c9e472a..5d1eb3236 100644 --- a/src/pipeline/CommonBindings.cpp +++ b/src/pipeline/CommonBindings.cpp @@ -6,6 +6,7 @@ #include "depthai-shared/common/MemoryInfo.hpp" #include "depthai-shared/common/ChipTemperature.hpp" #include "depthai-shared/common/CpuUsage.hpp" +#include "depthai-shared/common/ProcessorType.hpp" void CommonBindings::bind(pybind11::module& m){ @@ -53,4 +54,9 @@ void CommonBindings::bind(pybind11::module& m){ .def_readwrite("msTime", &CpuUsage::msTime) ; + // ProcessorType + py::enum_(m, "ProcessorType") + .value("LEON_CSS", ProcessorType::LEON_CSS) + .value("LEON_MSS", ProcessorType::LEON_MSS) + ; } diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index 960a31bb3..a086c7007 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -18,6 +18,9 @@ // Libraries #include "hedley/hedley.h" +// pybind11 +#include "pybind11/stl_bind.h" + // Map of python node classes and call to pipeline to create it std::vector(dai::Pipeline&)>>> pyNodeCreateMap; @@ -39,6 +42,79 @@ std::vector(dai:: return pyNodeCreateMap; } +// Bind map - without init method +template , typename... Args> +py::class_ bindNodeMap(py::handle scope, const std::string &name, Args&&... args) { + using namespace py; + using KeyType = typename Map::key_type; + using MappedType = typename Map::mapped_type; + using Class_ = class_; + + // If either type is a non-module-local bound type then make the map binding non-local as well; + // otherwise (e.g. both types are either module-local or converting) the map will be + // module-local. + auto tinfo = py::detail::get_type_info(typeid(MappedType)); + bool local = !tinfo || tinfo->module_local; + if (local) { + tinfo = py::detail::get_type_info(typeid(KeyType)); + local = !tinfo || tinfo->module_local; + } + + Class_ cl(scope, name.c_str(), pybind11::module_local(local), std::forward(args)...); + + // Register stream insertion operator (if possible) + detail::map_if_insertion_operator(cl, name); + + cl.def("__bool__", + [](const Map &m) -> bool { return !m.empty(); }, + "Check whether the map is nonempty" + ); + + cl.def("__iter__", + [](Map &m) { return make_key_iterator(m.begin(), m.end()); }, + keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */ + ); + + cl.def("items", + [](Map &m) { return make_iterator(m.begin(), m.end()); }, + keep_alive<0, 1>() /* Essential: keep list alive while iterator exists */ + ); + + // Modified __getitem__. Uses operator[] underneath + cl.def("__getitem__", + [](Map &m, const KeyType &k) -> MappedType & { + return m[k]; + }, + return_value_policy::reference_internal // ref + keepalive + ); + + cl.def("__contains__", + [](Map &m, const KeyType &k) -> bool { + auto it = m.find(k); + if (it == m.end()) + return false; + return true; + } + ); + + // Assignment provided only if the type is copyable + detail::map_assignment(cl); + + cl.def("__delitem__", + [](Map &m, const KeyType &k) { + auto it = m.find(k); + if (it == m.end()) + throw key_error(); + m.erase(it); + } + ); + + cl.def("__len__", &Map::size); + + return cl; +} + + void NodeBindings::bind(pybind11::module& m){ using namespace dai; @@ -60,6 +136,7 @@ void NodeBindings::bind(pybind11::module& m){ .def("setQueueSize", &Node::Input::setQueueSize) .def("getQueueSize", &Node::Input::getQueueSize) ; + // Node::Output bindings py::class_(pyNode, "Output") .def("canConnect", &Node::Output::canConnect) @@ -87,6 +164,10 @@ void NodeBindings::bind(pybind11::module& m){ // .def_readwrite("inputName", &dai::Node::Connection::inputName) // ; + // Node::InputMap bindings + bindNodeMap(pyNode, "InputMap"); + // Node::OutputMap bindings + bindNodeMap(pyNode, "OutputMap"); //// Bindings for actual nodes @@ -96,7 +177,6 @@ void NodeBindings::bind(pybind11::module& m){ // TODO(themarpe) - move properties into nodes and nodes under 'node' submodule //daiNodeModule = m.def_submodule("node"); - // XLinkIn node ADD_NODE(XLinkIn) .def_readonly("out", &XLinkIn::out) @@ -387,8 +467,12 @@ void NodeBindings::bind(pybind11::module& m){ // MicroPython node ADD_NODE(MicroPython) - .def_readonly("input", &MicroPython::input) + .def_readonly("inputs", &MicroPython::inputs) + .def_readonly("outputs", &MicroPython::outputs) .def("setScriptPath", &MicroPython::setScriptPath) + .def("getScriptPath", &MicroPython::getScriptPath) + .def("setProcessor", &MicroPython::setProcessor) + .def("getProcessor", &MicroPython::getProcessor) ; From 57fdd1c223480d1d5eb8964888d9eaf530c23c48 Mon Sep 17 00:00:00 2001 From: Jon Ngai Date: Tue, 30 Mar 2021 17:02:56 -0600 Subject: [PATCH 04/40] Checking in micropython asset changes. --- depthai-core | 2 +- src/pipeline/NodeBindings.cpp | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 31aa576d8..e1762f4cd 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 31aa576d88b4de011cb07322e44aaeadc4ff4ba7 +Subproject commit e1762f4cdb72a9a03fa66a66ed0333957b6b3d73 diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index a086c7007..a62547c99 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -469,8 +469,10 @@ void NodeBindings::bind(pybind11::module& m){ ADD_NODE(MicroPython) .def_readonly("inputs", &MicroPython::inputs) .def_readonly("outputs", &MicroPython::outputs) + .def("setName", &MicroPython::setName) .def("setScriptPath", &MicroPython::setScriptPath) .def("getScriptPath", &MicroPython::getScriptPath) + .def("addAsset", &MicroPython::addAsset) .def("setProcessor", &MicroPython::setProcessor) .def("getProcessor", &MicroPython::getProcessor) ; From 5a59aae2b4848951507c5f022ca6f79859b19d0d Mon Sep 17 00:00:00 2001 From: Jon Ngai Date: Wed, 31 Mar 2021 11:16:03 -0600 Subject: [PATCH 05/40] Adding load method for assetManager. --- src/pipeline/AssetManagerBindings.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/pipeline/AssetManagerBindings.cpp b/src/pipeline/AssetManagerBindings.cpp index 71daa0578..37570c387 100644 --- a/src/pipeline/AssetManagerBindings.cpp +++ b/src/pipeline/AssetManagerBindings.cpp @@ -30,7 +30,10 @@ void AssetManagerBindings::bind(pybind11::module& m){ .def("add", static_cast(&AssetManager::add), py::arg("asset")) .def("add", static_cast(&AssetManager::add), py::arg("key"), py::arg("asset")) .def("addExisting", &AssetManager::addExisting) - .def("set", &AssetManager::set) + .def("set", &AssetManager::set) + .def("load", [](AssetManager& am, const std::string& key, const std::string& path){ + am.load(key, path); + }) .def("get", static_cast (AssetManager::*)(const std::string&) const>(&AssetManager::get)) .def("get", static_cast (AssetManager::*)(const std::string&)>(&AssetManager::get)) .def("getAll", static_cast> (AssetManager::*)() const>(&AssetManager::getAll)) @@ -39,4 +42,4 @@ void AssetManagerBindings::bind(pybind11::module& m){ .def("remove", &AssetManager::remove) ; -} \ No newline at end of file +} From f47c9541cfcf03bb821b055629f08c48bc5bd5a7 Mon Sep 17 00:00:00 2001 From: Jon Ngai Date: Wed, 31 Mar 2021 16:11:57 -0600 Subject: [PATCH 06/40] Renaming MicroPython node to LxScript. --- depthai-core | 2 +- src/pipeline/NodeBindings.cpp | 22 +++++++++++----------- src/pipeline/PipelineBindings.cpp | 4 ++-- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/depthai-core b/depthai-core index e1762f4cd..d3d6d166b 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit e1762f4cdb72a9a03fa66a66ed0333957b6b3d73 +Subproject commit d3d6d166b6fb3ea42b4393e25e24b40438ccf56d diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index a62547c99..045c35f34 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -13,7 +13,7 @@ #include "depthai/pipeline/node/SPIOut.hpp" #include "depthai/pipeline/node/DetectionNetwork.hpp" #include "depthai/pipeline/node/SystemLogger.hpp" -#include "depthai/pipeline/node/MicroPython.hpp" +#include "depthai/pipeline/node/LxScript.hpp" // Libraries #include "hedley/hedley.h" @@ -465,16 +465,16 @@ void NodeBindings::bind(pybind11::module& m){ .def("setRate", &SystemLogger::setRate) ; - // MicroPython node - ADD_NODE(MicroPython) - .def_readonly("inputs", &MicroPython::inputs) - .def_readonly("outputs", &MicroPython::outputs) - .def("setName", &MicroPython::setName) - .def("setScriptPath", &MicroPython::setScriptPath) - .def("getScriptPath", &MicroPython::getScriptPath) - .def("addAsset", &MicroPython::addAsset) - .def("setProcessor", &MicroPython::setProcessor) - .def("getProcessor", &MicroPython::getProcessor) + // LxScript node + ADD_NODE(LxScript) + .def_readonly("inputs", &LxScript::inputs) + .def_readonly("outputs", &LxScript::outputs) + .def("setName", &LxScript::setName) + .def("setScriptPath", &LxScript::setScriptPath) + .def("getScriptPath", &LxScript::getScriptPath) + .def("addAsset", &LxScript::addAsset) + .def("setProcessor", &LxScript::setProcessor) + .def("getProcessor", &LxScript::getProcessor) ; diff --git a/src/pipeline/PipelineBindings.cpp b/src/pipeline/PipelineBindings.cpp index 15294d545..291851c85 100644 --- a/src/pipeline/PipelineBindings.cpp +++ b/src/pipeline/PipelineBindings.cpp @@ -16,7 +16,7 @@ #include "depthai/pipeline/node/MonoCamera.hpp" #include "depthai/pipeline/node/StereoDepth.hpp" #include "depthai/pipeline/node/DetectionNetwork.hpp" -#include "depthai/pipeline/node/MicroPython.hpp" +#include "depthai/pipeline/node/LxScript.hpp" // depthai-shared #include "depthai-shared/properties/GlobalProperties.hpp" @@ -85,7 +85,7 @@ void PipelineBindings::bind(pybind11::module& m){ .def("createStereoDepth", &Pipeline::create) .def("createMobileNetDetectionNetwork", &Pipeline::create) .def("createYoloDetectionNetwork", &Pipeline::create) - .def("createMicroPython", &Pipeline::create) + .def("createLxScript", &Pipeline::create) .def("create", [](dai::Pipeline& p, py::object class_) { auto node = createNode(p, class_); From 49112fbf118878b9d6c53e7ada86ca8d8f91ca51 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Sat, 3 Apr 2021 03:27:33 +0200 Subject: [PATCH 07/40] Improved documentation resolvment and 'pipeline.create' --- src/pipeline/NodeBindings.cpp | 370 ++++++++++++++++-------------- src/pipeline/NodeBindings.hpp | 2 +- src/pipeline/PipelineBindings.cpp | 4 +- src/py_bindings.cpp | 10 +- 4 files changed, 202 insertions(+), 184 deletions(-) diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index ad4dfe10b..11b903687 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -24,35 +24,38 @@ #include "pybind11/stl_bind.h" // Map of python node classes and call to pipeline to create it -std::vector(dai::Pipeline&)>>> pyNodeCreateMap; +std::vector(dai::Pipeline&, py::object class_)>>> pyNodeCreateMap; py::handle daiNodeModule; template -py::class_ addNode(const char* name){ - auto node = py::class_>(daiNodeModule, name); - pyNodeCreateMap.push_back(std::make_pair(node, [](dai::Pipeline& p){ +py::class_ addNode(const char* name, const char* docstring = nullptr){ + auto node = py::class_>(daiNodeModule, name, docstring); + pyNodeCreateMap.push_back(std::make_pair(node, [](dai::Pipeline& p, py::object class_){ return p.create(); })); return node; } template -py::class_ addNode(const char* name, const char* docstring){ +py::class_ addNodeAbstract(const char* name, const char* docstring = nullptr){ auto node = py::class_>(daiNodeModule, name, docstring); - pyNodeCreateMap.push_back(std::make_pair(node, [](dai::Pipeline& p){ - return p.create(); + pyNodeCreateMap.push_back(std::make_pair(node, [](dai::Pipeline& p, py::object class_) -> std::shared_ptr { + throw std::invalid_argument(std::string(py::str(class_)) + " is an abstract node. Choose an appropriate derived node instead"); + return nullptr; })); return node; } -std::vector(dai::Pipeline&)>>> NodeBindings::getNodeCreateMap(){ +std::vector(dai::Pipeline&, py::object class_)>>> NodeBindings::getNodeCreateMap(){ return pyNodeCreateMap; } // Macro helpers #define ADD_NODE(NodeName) addNode(#NodeName, DOC(dai, node, NodeName)) #define ADD_NODE_DERIVED(NodeName, Derived) addNode(#NodeName, DOC(dai, node, NodeName)) +#define ADD_NODE_ABSTRACT(NodeName) addNodeAbstract(#NodeName, DOC(dai, node, NodeName)) +#define ADD_NODE_DERIVED_ABSTRACT(NodeName, Derived) addNodeAbstract(#NodeName, DOC(dai, node, NodeName)) #define ADD_NODE_DOC(NodeName, docstring) addNode(#NodeName, docstring) #define ADD_NODE_DERIVED_DOC(NodeName, Derived, docstring) addNode(#NodeName, docstring) @@ -134,6 +137,165 @@ void NodeBindings::bind(pybind11::module& m){ using namespace dai; + + + //////////////////////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////////////////////// + // Node properties bindings first - so function params are resolved + //////////////////////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////////////////////// + + py::class_ colorCameraProperties(m, "ColorCameraProperties", DOC(dai, ColorCameraProperties)); + + py::enum_(colorCameraProperties, "SensorResolution", DOC(dai, ColorCameraProperties, SensorResolution)) + .value("THE_1080_P", ColorCameraProperties::SensorResolution::THE_1080_P) + .value("THE_4_K", ColorCameraProperties::SensorResolution::THE_4_K) + .value("THE_12_MP", ColorCameraProperties::SensorResolution::THE_12_MP) + ; + + py::enum_(colorCameraProperties, "ColorOrder", DOC(dai, ColorCameraProperties, ColorOrder)) + .value("BGR", ColorCameraProperties::ColorOrder::BGR) + .value("RGB", ColorCameraProperties::ColorOrder::RGB) + ; + + colorCameraProperties + .def_readwrite("initialControl", &ColorCameraProperties::initialControl) + .def_readwrite("boardSocket", &ColorCameraProperties::boardSocket) + .def_readwrite("colorOrder", &ColorCameraProperties::colorOrder) + .def_readwrite("interleaved", &ColorCameraProperties::interleaved) + .def_readwrite("previewHeight", &ColorCameraProperties::previewHeight) + .def_readwrite("previewWidth", &ColorCameraProperties::previewWidth) + .def_readwrite("videoHeight", &ColorCameraProperties::videoHeight) + .def_readwrite("videoWidth", &ColorCameraProperties::videoWidth) + .def_readwrite("stillHeight", &ColorCameraProperties::stillHeight) + .def_readwrite("stillWidth", &ColorCameraProperties::stillWidth) + .def_readwrite("resolution", &ColorCameraProperties::resolution) + .def_readwrite("fps", &ColorCameraProperties::fps) + .def_readwrite("sensorCropX", &ColorCameraProperties::sensorCropX) + .def_readwrite("sensorCropY", &ColorCameraProperties::sensorCropY) + ; + + + + // MonoCamera props + py::class_ monoCameraProperties(m, "MonoCameraProperties", DOC(dai, MonoCameraProperties)); + + py::enum_(monoCameraProperties, "SensorResolution", DOC(dai, MonoCameraProperties, SensorResolution)) + .value("THE_720_P", MonoCameraProperties::SensorResolution::THE_720_P) + .value("THE_800_P", MonoCameraProperties::SensorResolution::THE_800_P) + .value("THE_400_P", MonoCameraProperties::SensorResolution::THE_400_P) + ; + + monoCameraProperties + .def_readwrite("initialControl", &MonoCameraProperties::initialControl) + .def_readwrite("boardSocket", &MonoCameraProperties::boardSocket) + .def_readwrite("resolution", &MonoCameraProperties::resolution) + .def_readwrite("fps", &MonoCameraProperties::fps) + ; + + + // StereoDepth props + py::class_ stereoDepthProperties(m, "StereoDepthProperties", DOC(dai, StereoDepthProperties)); + + py::enum_(stereoDepthProperties, "MedianFilter", DOC(dai, StereoDepthProperties, MedianFilter)) + .value("MEDIAN_OFF", StereoDepthProperties::MedianFilter::MEDIAN_OFF) + .value("KERNEL_3x3", StereoDepthProperties::MedianFilter::KERNEL_3x3) + .value("KERNEL_5x5", StereoDepthProperties::MedianFilter::KERNEL_5x5) + .value("KERNEL_7x7", StereoDepthProperties::MedianFilter::KERNEL_7x7) + ; + + stereoDepthProperties + .def_readwrite("calibration", &StereoDepthProperties::calibration) + .def_readwrite("median", &StereoDepthProperties::median) + .def_readwrite("confidenceThreshold", &StereoDepthProperties::confidenceThreshold) + .def_readwrite("enableLeftRightCheck", &StereoDepthProperties::enableLeftRightCheck) + .def_readwrite("enableSubpixel", &StereoDepthProperties::enableSubpixel) + .def_readwrite("enableExtendedDisparity", &StereoDepthProperties::enableExtendedDisparity) + .def_readwrite("rectifyMirrorFrame", &StereoDepthProperties::rectifyMirrorFrame) + .def_readwrite("rectifyEdgeFillColor", &StereoDepthProperties::rectifyEdgeFillColor) + .def_readwrite("enableOutputRectified", &StereoDepthProperties::enableOutputRectified) + .def_readwrite("enableOutputDepth", &StereoDepthProperties::enableOutputDepth) + .def_readwrite("width", &StereoDepthProperties::width) + .def_readwrite("height", &StereoDepthProperties::height) + ; + + + // VideoEncoder props + py::class_ videoEncoderProperties(m, "VideoEncoderProperties", DOC(dai, VideoEncoderProperties)); + + py::enum_(videoEncoderProperties, "Profile", DOC(dai, VideoEncoderProperties, Profile)) + .value("H264_BASELINE", VideoEncoderProperties::Profile::H264_BASELINE) + .value("H264_HIGH", VideoEncoderProperties::Profile::H264_HIGH) + .value("H264_MAIN", VideoEncoderProperties::Profile::H264_MAIN) + .value("H265_MAIN", VideoEncoderProperties::Profile::H265_MAIN) + .value("MJPEG", VideoEncoderProperties::Profile::MJPEG) + ; + + py::enum_(videoEncoderProperties, "RateControlMode", DOC(dai, VideoEncoderProperties, RateControlMode)) + .value("CBR", VideoEncoderProperties::RateControlMode::CBR) + .value("VBR", VideoEncoderProperties::RateControlMode::VBR) + ; + + videoEncoderProperties + .def_readwrite("bitrate", &VideoEncoderProperties::bitrate) + .def_readwrite("keyframeFrequency", &VideoEncoderProperties::keyframeFrequency) + .def_readwrite("maxBitrate", &VideoEncoderProperties::maxBitrate) + .def_readwrite("numBFrames", &VideoEncoderProperties::numBFrames) + .def_readwrite("numFramesPool", &VideoEncoderProperties::numFramesPool) + .def_readwrite("profile", &VideoEncoderProperties::profile) + .def_readwrite("quality", &VideoEncoderProperties::quality) + .def_readwrite("rateCtrlMode", &VideoEncoderProperties::rateCtrlMode) + .def_readwrite("width", &VideoEncoderProperties::width) + .def_readwrite("height", &VideoEncoderProperties::height) + ; + + + // System logger + py::class_(m, "SystemLoggerProperties", DOC(dai, SystemLoggerProperties)) + .def_readwrite("rateHz", &SystemLoggerProperties::rateHz) + ; + + py::class_> neuralNetworkProperties(m, "NeuralNetworkProperties", DOC(dai, NeuralNetworkProperties)); + neuralNetworkProperties + .def_readwrite("blobSize", &NeuralNetworkProperties::blobSize) + .def_readwrite("blobUri", &NeuralNetworkProperties::blobUri) + .def_readwrite("numFrames", &NeuralNetworkProperties::numFrames) + .def_readwrite("numThreads", &NeuralNetworkProperties::numThreads) + .def_readwrite("numNCEPerThread", &NeuralNetworkProperties::numNCEPerThread) + ; + + + py::class_> detectionNetworkProperties(m, "DetectionNetworkProperties", DOC(dai, DetectionNetworkProperties)); + detectionNetworkProperties + .def_readwrite("nnFamily", &DetectionNetworkProperties::nnFamily) + .def_readwrite("confidenceThreshold", &DetectionNetworkProperties::confidenceThreshold) + .def_readwrite("classes", &DetectionNetworkProperties::classes) + .def_readwrite("coordinates", &DetectionNetworkProperties::coordinates) + .def_readwrite("anchors", &DetectionNetworkProperties::anchors) + .def_readwrite("anchorMasks", &DetectionNetworkProperties::anchorMasks) + .def_readwrite("iouThreshold", &DetectionNetworkProperties::iouThreshold) + ; + + + py::class_> spatialDetectionNetworkProperties(m, "SpatialDetectionNetworkProperties", DOC(dai, SpatialDetectionNetworkProperties)); + spatialDetectionNetworkProperties + .def_readwrite("detectedBBScaleFactor", &SpatialDetectionNetworkProperties::detectedBBScaleFactor) + .def_readwrite("depthThresholds", &SpatialDetectionNetworkProperties::depthThresholds) + ; + + py::class_ spatialLocationCalculatorProperties(m, "SpatialLocationCalculatorProperties", DOC(dai, SpatialLocationCalculatorProperties)); + spatialLocationCalculatorProperties + .def_readwrite("roiConfig", &SpatialLocationCalculatorProperties::roiConfig) + .def_readwrite("inputConfigSync", &SpatialLocationCalculatorProperties::inputConfigSync) + ; + + + //////////////////////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////////////////////// + // Node Bindings after properties, so types are resolved + //////////////////////////////////////////////////////////////////////////////////////// + //////////////////////////////////////////////////////////////////////////////////////// + // Base 'Node' class binding py::class_> pyNode(m, "Node", DOC(dai, Node)); pyNode @@ -277,7 +439,9 @@ void NodeBindings::bind(pybind11::module& m){ .def("setPreviewKeepAspectRatio", &ColorCamera::setPreviewKeepAspectRatio, py::arg("keep"), DOC(dai, node, ColorCamera, setPreviewKeepAspectRatio)) .def("getPreviewKeepAspectRatio", &ColorCamera::getPreviewKeepAspectRatio, DOC(dai, node, ColorCamera, getPreviewKeepAspectRatio)) ; - + // ALIAS + daiNodeModule.attr("ColorCamera").attr("Properties") = colorCameraProperties; + // NeuralNetwork node @@ -291,6 +455,8 @@ void NodeBindings::bind(pybind11::module& m){ .def("setNumNCEPerInferenceThread", &NeuralNetwork::setNumNCEPerInferenceThread, py::arg("numNCEPerThread"), DOC(dai, node, NeuralNetwork, setNumNCEPerInferenceThread)) .def("getNumInferenceThreads", &NeuralNetwork::getNumInferenceThreads, DOC(dai, node, NeuralNetwork, getNumInferenceThreads)) ; + // Properties alias + daiNodeModule.attr("NeuralNetwork").attr("Properties") = neuralNetworkProperties; // ImageManip node @@ -394,7 +560,9 @@ void NodeBindings::bind(pybind11::module& m){ .def("getResolutionWidth", &MonoCamera::getResolutionWidth, DOC(dai, node, MonoCamera, getResolutionWidth)) .def("getResolutionHeight", &MonoCamera::getResolutionHeight, DOC(dai, node, MonoCamera, getResolutionHeight)) ; - + // ALIAS + daiNodeModule.attr("MonoCamera").attr("Properties") = monoCameraProperties; + // StereoDepth node ADD_NODE(StereoDepth) @@ -420,7 +588,9 @@ void NodeBindings::bind(pybind11::module& m){ .def("setOutputRectified", &StereoDepth::setOutputRectified, py::arg("enable"), DOC(dai, node, StereoDepth, setOutputRectified)) .def("setOutputDepth", &StereoDepth::setOutputDepth, py::arg("enable"), DOC(dai, node, StereoDepth, setOutputDepth)) ; - + // ALIAS + daiNodeModule.attr("StereoDepth").attr("Properties") = stereoDepthProperties; + // VideoEncoder node ADD_NODE(VideoEncoder) .def_readonly("input", &VideoEncoder::input, DOC(dai, node, VideoEncoder, input), DOC(dai, node, VideoEncoder, input)) @@ -451,6 +621,9 @@ void NodeBindings::bind(pybind11::module& m){ .def("getFrameRate", &VideoEncoder::getFrameRate, DOC(dai, node, VideoEncoder, getFrameRate)) .def("getSize", &VideoEncoder::getSize, DOC(dai, node, VideoEncoder, getSize)) ; + // ALIAS + daiNodeModule.attr("VideoEncoder").attr("Properties") = videoEncoderProperties; + ADD_NODE(SPIOut) .def_readonly("input", &SPIOut::input, DOC(dai, node, SPIOut, input)) @@ -458,12 +631,15 @@ void NodeBindings::bind(pybind11::module& m){ .def("setBusId", &SPIOut::setBusId, py::arg("id"), DOC(dai, node, SPIOut, setBusId)) ; - py::class_>(daiNodeModule, "DetectionNetwork", DOC(dai, node, DetectionNetwork)) + ADD_NODE_DERIVED_ABSTRACT(DetectionNetwork, NeuralNetwork) .def_readonly("input", &DetectionNetwork::input, DOC(dai, node, DetectionNetwork, input)) .def_readonly("out", &DetectionNetwork::out, DOC(dai, node, DetectionNetwork, out)) .def_readonly("passthrough", &DetectionNetwork::passthrough, DOC(dai, node, DetectionNetwork, passthrough)) .def("setConfidenceThreshold", &DetectionNetwork::setConfidenceThreshold, py::arg("thresh"), DOC(dai, node, DetectionNetwork, setConfidenceThreshold)) ; + // ALIAS + daiNodeModule.attr("DetectionNetwork").attr("Properties") = detectionNetworkProperties; + // MobileNetDetectionNetwork node ADD_NODE_DERIVED(MobileNetDetectionNetwork, DetectionNetwork) @@ -478,7 +654,7 @@ void NodeBindings::bind(pybind11::module& m){ .def("setIouThreshold", &YoloDetectionNetwork::setIouThreshold, py::arg("thresh"), DOC(dai, node, YoloDetectionNetwork, setIouThreshold)) ; - py::class_>(daiNodeModule, "SpatialDetectionNetwork", DOC(dai, node, SpatialDetectionNetwork)) + ADD_NODE_DERIVED_ABSTRACT(SpatialDetectionNetwork, DetectionNetwork) .def_readonly("input", &SpatialDetectionNetwork::input, DOC(dai, node, SpatialDetectionNetwork, input)) .def_readonly("inputDepth", &SpatialDetectionNetwork::inputDepth, DOC(dai, node, SpatialDetectionNetwork, inputDepth)) .def_readonly("out", &SpatialDetectionNetwork::out, DOC(dai, node, SpatialDetectionNetwork, out)) @@ -490,6 +666,8 @@ void NodeBindings::bind(pybind11::module& m){ .def("setDepthLowerThreshold", &SpatialDetectionNetwork::setDepthLowerThreshold, py::arg("lowerThreshold"), DOC(dai, node, SpatialDetectionNetwork, setDepthLowerThreshold)) .def("setDepthUpperThreshold", &SpatialDetectionNetwork::setDepthUpperThreshold, py::arg("upperThreshold"), DOC(dai, node, SpatialDetectionNetwork, setDepthUpperThreshold)) ; + // ALIAS + daiNodeModule.attr("SpatialDetectionNetwork").attr("Properties") = spatialDetectionNetworkProperties; // MobileNetSpatialDetectionNetwork ADD_NODE_DERIVED(MobileNetSpatialDetectionNetwork, SpatialDetectionNetwork) @@ -513,6 +691,8 @@ void NodeBindings::bind(pybind11::module& m){ .def_readonly("initialConfig", &SpatialLocationCalculator::initialConfig, DOC(dai, node, SpatialLocationCalculator, initialConfig)) .def("setWaitForConfigInput", &SpatialLocationCalculator::setWaitForConfigInput, py::arg("wait"), DOC(dai, node, SpatialLocationCalculator, setWaitForConfigInput)) ; + // ALIAS + daiNodeModule.attr("SpatialLocationCalculator").attr("Properties") = spatialLocationCalculatorProperties; // SystemLogger node ADD_NODE(SystemLogger) @@ -531,167 +711,5 @@ void NodeBindings::bind(pybind11::module& m){ .def("getProcessor", &LxScript::getProcessor) ; - - - - //////////////////////////////////// - // Node properties bindings - //////////////////////////////////// - py::class_ colorCameraProperties(m, "ColorCameraProperties", DOC(dai, ColorCameraProperties)); - - py::enum_(colorCameraProperties, "SensorResolution", DOC(dai, ColorCameraProperties, SensorResolution)) - .value("THE_1080_P", ColorCameraProperties::SensorResolution::THE_1080_P) - .value("THE_4_K", ColorCameraProperties::SensorResolution::THE_4_K) - .value("THE_12_MP", ColorCameraProperties::SensorResolution::THE_12_MP) - ; - - py::enum_(colorCameraProperties, "ColorOrder", DOC(dai, ColorCameraProperties, ColorOrder)) - .value("BGR", ColorCameraProperties::ColorOrder::BGR) - .value("RGB", ColorCameraProperties::ColorOrder::RGB) - ; - - colorCameraProperties - .def_readwrite("initialControl", &ColorCameraProperties::initialControl) - .def_readwrite("boardSocket", &ColorCameraProperties::boardSocket) - .def_readwrite("colorOrder", &ColorCameraProperties::colorOrder) - .def_readwrite("interleaved", &ColorCameraProperties::interleaved) - .def_readwrite("previewHeight", &ColorCameraProperties::previewHeight) - .def_readwrite("previewWidth", &ColorCameraProperties::previewWidth) - .def_readwrite("videoHeight", &ColorCameraProperties::videoHeight) - .def_readwrite("videoWidth", &ColorCameraProperties::videoWidth) - .def_readwrite("stillHeight", &ColorCameraProperties::stillHeight) - .def_readwrite("stillWidth", &ColorCameraProperties::stillWidth) - .def_readwrite("resolution", &ColorCameraProperties::resolution) - .def_readwrite("fps", &ColorCameraProperties::fps) - .def_readwrite("sensorCropX", &ColorCameraProperties::sensorCropX) - .def_readwrite("sensorCropY", &ColorCameraProperties::sensorCropY) - ; - // ALIAS - m.attr("ColorCamera").attr("Properties") = colorCameraProperties; - - - - // MonoCamera props - py::class_ monoCameraProperties(m, "MonoCameraProperties", DOC(dai, MonoCameraProperties)); - - py::enum_(monoCameraProperties, "SensorResolution", DOC(dai, MonoCameraProperties, SensorResolution)) - .value("THE_720_P", MonoCameraProperties::SensorResolution::THE_720_P) - .value("THE_800_P", MonoCameraProperties::SensorResolution::THE_800_P) - .value("THE_400_P", MonoCameraProperties::SensorResolution::THE_400_P) - ; - - monoCameraProperties - .def_readwrite("initialControl", &MonoCameraProperties::initialControl) - .def_readwrite("boardSocket", &MonoCameraProperties::boardSocket) - .def_readwrite("resolution", &MonoCameraProperties::resolution) - .def_readwrite("fps", &MonoCameraProperties::fps) - ; - // ALIAS - m.attr("MonoCamera").attr("Properties") = monoCameraProperties; - - - // StereoDepth props - py::class_ stereoDepthProperties(m, "StereoDepthProperties", DOC(dai, StereoDepthProperties)); - - py::enum_(stereoDepthProperties, "MedianFilter", DOC(dai, StereoDepthProperties, MedianFilter)) - .value("MEDIAN_OFF", StereoDepthProperties::MedianFilter::MEDIAN_OFF) - .value("KERNEL_3x3", StereoDepthProperties::MedianFilter::KERNEL_3x3) - .value("KERNEL_5x5", StereoDepthProperties::MedianFilter::KERNEL_5x5) - .value("KERNEL_7x7", StereoDepthProperties::MedianFilter::KERNEL_7x7) - ; - - stereoDepthProperties - .def_readwrite("calibration", &StereoDepthProperties::calibration) - .def_readwrite("median", &StereoDepthProperties::median) - .def_readwrite("confidenceThreshold", &StereoDepthProperties::confidenceThreshold) - .def_readwrite("enableLeftRightCheck", &StereoDepthProperties::enableLeftRightCheck) - .def_readwrite("enableSubpixel", &StereoDepthProperties::enableSubpixel) - .def_readwrite("enableExtendedDisparity", &StereoDepthProperties::enableExtendedDisparity) - .def_readwrite("rectifyMirrorFrame", &StereoDepthProperties::rectifyMirrorFrame) - .def_readwrite("rectifyEdgeFillColor", &StereoDepthProperties::rectifyEdgeFillColor) - .def_readwrite("enableOutputRectified", &StereoDepthProperties::enableOutputRectified) - .def_readwrite("enableOutputDepth", &StereoDepthProperties::enableOutputDepth) - .def_readwrite("width", &StereoDepthProperties::width) - .def_readwrite("height", &StereoDepthProperties::height) - ; - // ALIAS - m.attr("StereoDepth").attr("Properties") = stereoDepthProperties; - - // VideoEncoder props - py::class_ videoEncoderProperties(m, "VideoEncoderProperties", DOC(dai, VideoEncoderProperties)); - - py::enum_(videoEncoderProperties, "Profile", DOC(dai, VideoEncoderProperties, Profile)) - .value("H264_BASELINE", VideoEncoderProperties::Profile::H264_BASELINE) - .value("H264_HIGH", VideoEncoderProperties::Profile::H264_HIGH) - .value("H264_MAIN", VideoEncoderProperties::Profile::H264_MAIN) - .value("H265_MAIN", VideoEncoderProperties::Profile::H265_MAIN) - .value("MJPEG", VideoEncoderProperties::Profile::MJPEG) - ; - - py::enum_(videoEncoderProperties, "RateControlMode", DOC(dai, VideoEncoderProperties, RateControlMode)) - .value("CBR", VideoEncoderProperties::RateControlMode::CBR) - .value("VBR", VideoEncoderProperties::RateControlMode::VBR) - ; - - videoEncoderProperties - .def_readwrite("bitrate", &VideoEncoderProperties::bitrate) - .def_readwrite("keyframeFrequency", &VideoEncoderProperties::keyframeFrequency) - .def_readwrite("maxBitrate", &VideoEncoderProperties::maxBitrate) - .def_readwrite("numBFrames", &VideoEncoderProperties::numBFrames) - .def_readwrite("numFramesPool", &VideoEncoderProperties::numFramesPool) - .def_readwrite("profile", &VideoEncoderProperties::profile) - .def_readwrite("quality", &VideoEncoderProperties::quality) - .def_readwrite("rateCtrlMode", &VideoEncoderProperties::rateCtrlMode) - .def_readwrite("width", &VideoEncoderProperties::width) - .def_readwrite("height", &VideoEncoderProperties::height) - ; - // ALIAS - m.attr("VideoEncoder").attr("Properties") = videoEncoderProperties; - - // System logger - py::class_(m, "SystemLoggerProperties", DOC(dai, SystemLoggerProperties)) - .def_readwrite("rateHz", &SystemLoggerProperties::rateHz) - ; - - py::class_> neuralNetworkProperties(m, "NeuralNetworkProperties", DOC(dai, NeuralNetworkProperties)); - neuralNetworkProperties - .def_readwrite("blobSize", &NeuralNetworkProperties::blobSize) - .def_readwrite("blobUri", &NeuralNetworkProperties::blobUri) - .def_readwrite("numFrames", &NeuralNetworkProperties::numFrames) - .def_readwrite("numThreads", &NeuralNetworkProperties::numThreads) - .def_readwrite("numNCEPerThread", &NeuralNetworkProperties::numNCEPerThread) - ; - m.attr("NeuralNetwork").attr("Properties") = neuralNetworkProperties; - - - py::class_> detectionNetworkProperties(m, "DetectionNetworkProperties", DOC(dai, DetectionNetworkProperties)); - detectionNetworkProperties - .def_readwrite("nnFamily", &DetectionNetworkProperties::nnFamily) - .def_readwrite("confidenceThreshold", &DetectionNetworkProperties::confidenceThreshold) - .def_readwrite("classes", &DetectionNetworkProperties::classes) - .def_readwrite("coordinates", &DetectionNetworkProperties::coordinates) - .def_readwrite("anchors", &DetectionNetworkProperties::anchors) - .def_readwrite("anchorMasks", &DetectionNetworkProperties::anchorMasks) - .def_readwrite("iouThreshold", &DetectionNetworkProperties::iouThreshold) - ; - // ALIAS - m.attr("DetectionNetwork").attr("Properties") = detectionNetworkProperties; - - - py::class_> spatialDetectionNetworkProperties(m, "SpatialDetectionNetworkProperties", DOC(dai, SpatialDetectionNetworkProperties)); - spatialDetectionNetworkProperties - .def_readwrite("detectedBBScaleFactor", &SpatialDetectionNetworkProperties::detectedBBScaleFactor) - .def_readwrite("depthThresholds", &SpatialDetectionNetworkProperties::depthThresholds) - ; - // ALIAS - m.attr("SpatialDetectionNetwork").attr("Properties") = spatialDetectionNetworkProperties; - - py::class_ spatialLocationCalculatorProperties(m, "SpatialLocationCalculatorProperties", DOC(dai, SpatialLocationCalculatorProperties)); - spatialLocationCalculatorProperties - .def_readwrite("roiConfig", &SpatialLocationCalculatorProperties::roiConfig) - .def_readwrite("inputConfigSync", &SpatialLocationCalculatorProperties::inputConfigSync) - ; - m.attr("SpatialLocationCalculator").attr("Properties") = spatialLocationCalculatorProperties; - - } + diff --git a/src/pipeline/NodeBindings.hpp b/src/pipeline/NodeBindings.hpp index 48ab99c99..76cb1173d 100644 --- a/src/pipeline/NodeBindings.hpp +++ b/src/pipeline/NodeBindings.hpp @@ -8,5 +8,5 @@ struct NodeBindings : public dai::Node { static void bind(pybind11::module& m); - static std::vector(dai::Pipeline&)>>> getNodeCreateMap(); + static std::vector(dai::Pipeline&, py::object class_)>>> getNodeCreateMap(); }; diff --git a/src/pipeline/PipelineBindings.cpp b/src/pipeline/PipelineBindings.cpp index 989f9011c..5bdc72feb 100644 --- a/src/pipeline/PipelineBindings.cpp +++ b/src/pipeline/PipelineBindings.cpp @@ -32,7 +32,7 @@ std::shared_ptr createNode(dai::Pipeline& p, py::object class_){ auto& node = kv.first; auto& create = kv.second; if(node.is(class_)){ - return create(p); + return create(p, class_); } } return nullptr; @@ -78,7 +78,7 @@ void PipelineBindings::bind(pybind11::module& m){ .def("create", [](dai::Pipeline& p, py::object class_) { auto node = createNode(p, class_); if(node == nullptr){ - throw std::invalid_argument(std::string(py::str(class_)) + " is not a subclass of depthai.Node"); + throw std::invalid_argument(std::string(py::str(class_)) + " is not a subclass of depthai.node"); } return node; }) diff --git a/src/py_bindings.cpp b/src/py_bindings.cpp index bb859530c..b3db35210 100644 --- a/src/py_bindings.cpp +++ b/src/py_bindings.cpp @@ -35,17 +35,17 @@ PYBIND11_MODULE(depthai,m) m.attr("__version__") = DEPTHAI_PYTHON_VERSION; // Add bindings + LogBindings::bind(m); + DataQueueBindings::bind(m); + DatatypeBindings::bind(m); + CommonBindings::bind(m); OpenVINOBindings::bind(m); AssetManagerBindings::bind(m); - NodeBindings::bind(m); PipelineBindings::bind(m); + NodeBindings::bind(m); XLinkConnectionBindings::bind(m); DeviceBindings::bind(m); DeviceBootloaderBindings::bind(m); - CommonBindings::bind(m); - DatatypeBindings::bind(m); - DataQueueBindings::bind(m); - LogBindings::bind(m); // Call dai::initialize on 'import depthai' to initialize asap dai::initialize(); From 9b77f1bfdacc2673052cd386e862cefac25554a7 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Sun, 4 Apr 2021 03:04:38 +0200 Subject: [PATCH 08/40] Added a simple scripting example --- depthai-core | 2 +- examples/29_simple_script_camera_control.py | 40 +++++++++++++++++++++ src/pipeline/AssetManagerBindings.cpp | 7 ++-- src/pipeline/NodeBindings.cpp | 14 ++++---- src/pipeline/PipelineBindings.cpp | 1 - 5 files changed, 53 insertions(+), 11 deletions(-) create mode 100644 examples/29_simple_script_camera_control.py diff --git a/depthai-core b/depthai-core index 59a913044..d2609ea2b 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 59a9130448119a785d8c948be8c89087591efd2a +Subproject commit d2609ea2bd774ba517b9d30c68ba3d69b6920b6b diff --git a/examples/29_simple_script_camera_control.py b/examples/29_simple_script_camera_control.py new file mode 100644 index 000000000..b30828c19 --- /dev/null +++ b/examples/29_simple_script_camera_control.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +import cv2 +import depthai as dai +import numpy as np +import time + +# Start defining a pipeline +pipeline = dai.Pipeline() + +# Define a source - color camera +cam = pipeline.create(dai.node.ColorCamera) + +# Script node +script = pipeline.create(dai.node.LxScript) +script.setScriptData(""" +import time +ctrl = CameraControl() +ctrl.setCaptureStill(True) +while True: + time.sleep(1) + node.io['out'].send(ctrl) +""") + +# XLinkOut +xout = pipeline.create(dai.node.XLinkOut) +xout.setStreamName('still') + +# Connections +script.outputs['out'].link(cam.inputControl) +cam.still.link(xout.input) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + # Start pipeline + device.startPipeline() + while True: + img = device.getOutputQueue("still").get() + cv2.imshow('still', img.getCvFrame()) + if cv2.waitKey(1) == ord('q'): + exit(0) diff --git a/src/pipeline/AssetManagerBindings.cpp b/src/pipeline/AssetManagerBindings.cpp index 486676336..a8494e862 100644 --- a/src/pipeline/AssetManagerBindings.cpp +++ b/src/pipeline/AssetManagerBindings.cpp @@ -28,9 +28,10 @@ void AssetManagerBindings::bind(pybind11::module& m){ py::class_(m, "AssetManager", DOC(dai, AssetManager)) .def(py::init<>()) .def("addExisting", &AssetManager::addExisting, py::arg("assets"), DOC(dai, AssetManager, addExisting)) - .def("add", static_cast(&AssetManager::add), py::arg("asset"), DOC(dai, AssetManager, add)) - .def("add", static_cast(&AssetManager::add), py::arg("key"), py::arg("asset"), DOC(dai, AssetManager, add, 2)) - .def("load", &AssetManager::load, py::arg("key"), py::arg("path"), py::arg("alignment") = 64, DOC(dai, AssetManager, load)) + .def("add", static_cast (AssetManager::*)(Asset)>(&AssetManager::add), py::arg("asset"), DOC(dai, AssetManager, add)) + .def("add", static_cast (AssetManager::*)(const std::string&, Asset)>(&AssetManager::add), py::arg("key"), py::arg("asset"), DOC(dai, AssetManager, add, 2)) + .def("add", static_cast (AssetManager::*)(const std::string& key, const std::string& path, int alignment)>(&AssetManager::add), py::arg("key"), py::arg("path"), py::arg("alignment") = 64, DOC(dai, AssetManager, add, 3)) + .def("add", static_cast (AssetManager::*)(const std::string& key, const std::vector& data, int alignment)>(&AssetManager::add), py::arg("key"), py::arg("data"), py::arg("alignment") = 64, DOC(dai, AssetManager, add, 4)) .def("set", &AssetManager::set, py::arg("key"), py::arg("asset"), DOC(dai, AssetManager, set)) .def("get", static_cast (AssetManager::*)(const std::string&) const>(&AssetManager::get), py::arg("key"), DOC(dai, AssetManager, get)) .def("get", static_cast (AssetManager::*)(const std::string&)>(&AssetManager::get), py::arg("key"), DOC(dai, AssetManager, get, 2)) diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index 11b903687..086743fd7 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -303,7 +303,8 @@ void NodeBindings::bind(pybind11::module& m){ .def("getName", &Node::getName, DOC(dai, Node, getName)) .def("getOutputs", &Node::getOutputs, DOC(dai, Node, getOutputs)) .def("getInputs", &Node::getInputs, DOC(dai, Node, getInputs)) - .def("getAssets", &Node::getAssets, DOC(dai, Node, getAssets)) + .def("getAssetManager", static_cast(&Node::getAssetManager), py::return_value_policy::reference_internal, DOC(dai, Node, getAssetManager)) + .def("getAssetManager", static_cast(&Node::getAssetManager), py::return_value_policy::reference_internal, DOC(dai, Node, getAssetManager)) ; // Node::Input bindings @@ -697,6 +698,7 @@ void NodeBindings::bind(pybind11::module& m){ // SystemLogger node ADD_NODE(SystemLogger) .def_readonly("out", &SystemLogger::out, DOC(dai, node, SystemLogger, out)) + .def("setRate", &SystemLogger::setRate, DOC(dai, node, SystemLogger, setRate)) ; // LxScript node @@ -704,11 +706,11 @@ void NodeBindings::bind(pybind11::module& m){ .def_readonly("inputs", &LxScript::inputs) .def_readonly("outputs", &LxScript::outputs) .def("setName", &LxScript::setName) - .def("setScriptPath", &LxScript::setScriptPath) - .def("getScriptPath", &LxScript::getScriptPath) - .def("addAsset", &LxScript::addAsset) - .def("setProcessor", &LxScript::setProcessor) - .def("getProcessor", &LxScript::getProcessor) + .def("setScriptPath", &LxScript::setScriptPath, DOC(dai, node, LxScript, setScriptPath)) + .def("setScriptData", static_cast(&LxScript::setScriptData), py::arg("script"), DOC(dai, node, LxScript, setScriptData)) + .def("setScriptData", static_cast&)>(&LxScript::setScriptData), py::arg("data"), DOC(dai, node, LxScript, setScriptData, 2)) + .def("setProcessor", &LxScript::setProcessor, DOC(dai, node, LxScript, setProcessor)) + .def("getProcessor", &LxScript::getProcessor, DOC(dai, node, LxScript, getProcessor)) ; } diff --git a/src/pipeline/PipelineBindings.cpp b/src/pipeline/PipelineBindings.cpp index 5bdc72feb..cb10b652d 100644 --- a/src/pipeline/PipelineBindings.cpp +++ b/src/pipeline/PipelineBindings.cpp @@ -69,7 +69,6 @@ void PipelineBindings::bind(pybind11::module& m){ .def("getNodeMap", &Pipeline::getNodeMap, DOC(dai, Pipeline, getNodeMap), py::return_value_policy::reference_internal, DOC(dai, Pipeline, getNodeMap)) .def("link", &Pipeline::link, DOC(dai, Pipeline, link), DOC(dai, Pipeline, link)) .def("unlink", &Pipeline::unlink, DOC(dai, Pipeline, unlink), DOC(dai, Pipeline, unlink)) - .def("getAllAssets", &Pipeline::getAllAssets, DOC(dai, Pipeline, getAllAssets)) .def("getAssetManager", static_cast(&Pipeline::getAssetManager), py::return_value_policy::reference_internal, DOC(dai, Pipeline, getAssetManager)) .def("getAssetManager", static_cast(&Pipeline::getAssetManager), py::return_value_policy::reference_internal, DOC(dai, Pipeline, getAssetManager)) .def("setOpenVINOVersion", &Pipeline::setOpenVINOVersion, py::arg("version") = Pipeline::DEFAULT_OPENVINO_VERSION, DOC(dai, Pipeline, setOpenVINOVersion), DOC(dai, Pipeline, setOpenVINOVersion)) From 0f0c284d199f113149fad47093558a56230d9359 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Sun, 4 Apr 2021 03:47:09 +0200 Subject: [PATCH 09/40] Updated core --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index d2609ea2b..34ea76578 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit d2609ea2bd774ba517b9d30c68ba3d69b6920b6b +Subproject commit 34ea765787d2a0dbefe7ef06dbd8bc132373a9bb From dee494774b386ac66404bd37b4ecd47a2a25f48d Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Sun, 4 Apr 2021 17:57:47 +0200 Subject: [PATCH 10/40] Renamed node 'LxScript' to 'Script' and deprecated pipeline.create[Node]() --- depthai-core | 2 +- docs/source/tutorials/hello_world.rst | 6 +- examples/01_rgb_preview.py | 4 +- examples/02_mono_preview.py | 8 +- examples/03_depth_preview.py | 8 +- examples/04_rgb_encoding.py | 6 +- examples/05_rgb_mono_encoding.py | 18 ++--- examples/06_rgb_full_resolution_saver.py | 8 +- examples/07_mono_full_resolution_saver.py | 4 +- examples/08_rgb_mobilenet.py | 8 +- examples/09_mono_mobilenet.py | 10 +-- examples/10_mono_depth_mobilenetssd.py | 16 ++-- examples/11_rgb_encoding_mono_mobilenet.py | 18 ++--- .../12_rgb_encoding_mono_mobilenet_depth.py | 24 +++--- examples/13_encoding_max_limit.py | 20 ++--- examples/14_1_color_camera_control.py | 16 ++-- examples/14_2_mono_camera_control.py | 16 ++-- examples/14_3_depth_crop_control.py | 12 +-- examples/15_rgb_mobilenet_4k.py | 10 +-- examples/16_device_queue_event.py | 8 +- examples/17_video_mobilenet.py | 6 +- examples/18_rgb_encoding_mobilenet.py | 12 +-- examples/19_mono_camera_control.py | 10 +-- examples/20_color_rotate_warp.py | 10 +-- .../22_1_tiny_yolo_v3_device_side_decoding.py | 8 +- .../22_2_tiny_yolo_v4_device_side_decoding.py | 8 +- examples/23_autoexposure_roi.py | 10 +-- examples/24_opencv_support.py | 6 +- examples/25_system_information.py | 4 +- examples/26_1_spatial_mobilenet.py | 20 ++--- examples/26_2_spatial_mobilenet_mono.py | 18 ++--- examples/26_3_spatial_tiny_yolo.py | 20 ++--- examples/27_spatial_location_calculator.py | 16 ++-- examples/28_camera_video_example.py | 4 +- examples/29_simple_script_camera_control.py | 2 +- src/pipeline/NodeBindings.cpp | 23 +++--- src/pipeline/PipelineBindings.cpp | 81 ++++++++++++++----- 37 files changed, 262 insertions(+), 218 deletions(-) diff --git a/depthai-core b/depthai-core index 34ea76578..9c5b7fe8f 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 34ea765787d2a0dbefe7ef06dbd8bc132373a9bb +Subproject commit 9c5b7fe8f57e7c98d33dacbdbf9f1b460c226b8c diff --git a/docs/source/tutorials/hello_world.rst b/docs/source/tutorials/hello_world.rst index 9d0d0cbbe..55086b316 100644 --- a/docs/source/tutorials/hello_world.rst +++ b/docs/source/tutorials/hello_world.rst @@ -107,7 +107,7 @@ Now, first node we will add is a :class:`ColorCamera`. We will use the :code:`pr .. code-block:: python - cam_rgb = pipeline.createColorCamera() + cam_rgb = pipeline.create(dai.node.ColorCamera) cam_rgb.setPreviewSize(300, 300) cam_rgb.setInterleaved(False) @@ -131,11 +131,11 @@ and in our case, since we want to receive data from device to host, we will use .. code-block:: python - xout_rgb = pipeline.createXLinkOut() + xout_rgb = pipeline.create(dai.node.XLinkOut) xout_rgb.setStreamName("rgb") cam_rgb.preview.link(xout_rgb.input) - xout_nn = pipeline.createXLinkOut() + xout_nn = pipeline.create(dai.node.XLinkOut) xout_nn.setStreamName("nn") detection_nn.out.link(xout_nn.input) diff --git a/examples/01_rgb_preview.py b/examples/01_rgb_preview.py index ea7523ff2..637964bbf 100755 --- a/examples/01_rgb_preview.py +++ b/examples/01_rgb_preview.py @@ -7,7 +7,7 @@ pipeline = dai.Pipeline() # Define a source - color camera -camRgb = pipeline.createColorCamera() +camRgb = pipeline.create(dai.node.ColorCamera) camRgb.setPreviewSize(300, 300) camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) @@ -15,7 +15,7 @@ camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB) # Create output -xoutRgb = pipeline.createXLinkOut() +xoutRgb = pipeline.create(dai.node.XLinkOut) xoutRgb.setStreamName("rgb") camRgb.preview.link(xoutRgb.input) diff --git a/examples/02_mono_preview.py b/examples/02_mono_preview.py index e5ae07e07..e51ec031b 100755 --- a/examples/02_mono_preview.py +++ b/examples/02_mono_preview.py @@ -7,20 +7,20 @@ pipeline = dai.Pipeline() # Define a source - two mono (grayscale) cameras -camLeft = pipeline.createMonoCamera() +camLeft = pipeline.create(dai.node.MonoCamera) camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) camLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -camRight = pipeline.createMonoCamera() +camRight = pipeline.create(dai.node.MonoCamera) camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) camRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) # Create outputs -xoutLeft = pipeline.createXLinkOut() +xoutLeft = pipeline.create(dai.node.XLinkOut) xoutLeft.setStreamName('left') camLeft.out.link(xoutLeft.input) -xoutRight = pipeline.createXLinkOut() +xoutRight = pipeline.create(dai.node.XLinkOut) xoutRight.setStreamName('right') camRight.out.link(xoutRight.input) diff --git a/examples/03_depth_preview.py b/examples/03_depth_preview.py index ba0a55938..605550a20 100755 --- a/examples/03_depth_preview.py +++ b/examples/03_depth_preview.py @@ -8,16 +8,16 @@ pipeline = dai.Pipeline() # Define a source - two mono (grayscale) cameras -left = pipeline.createMonoCamera() +left = pipeline.create(dai.node.MonoCamera) left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) left.setBoardSocket(dai.CameraBoardSocket.LEFT) -right = pipeline.createMonoCamera() +right = pipeline.create(dai.node.MonoCamera) right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) right.setBoardSocket(dai.CameraBoardSocket.RIGHT) # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) -depth = pipeline.createStereoDepth() +depth = pipeline.create(dai.node.StereoDepth) depth.setConfidenceThreshold(200) # Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default) median = dai.StereoDepthProperties.MedianFilter.KERNEL_7x7 # For depth filtering @@ -43,7 +43,7 @@ right.out.link(depth.right) # Create output -xout = pipeline.createXLinkOut() +xout = pipeline.create(dai.node.XLinkOut) xout.setStreamName("disparity") depth.disparity.link(xout.input) diff --git a/examples/04_rgb_encoding.py b/examples/04_rgb_encoding.py index 28710e7c3..32150d916 100755 --- a/examples/04_rgb_encoding.py +++ b/examples/04_rgb_encoding.py @@ -6,17 +6,17 @@ pipeline = dai.Pipeline() # Define a source - color camera -cam = pipeline.createColorCamera() +cam = pipeline.create(dai.node.ColorCamera) cam.setBoardSocket(dai.CameraBoardSocket.RGB) cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) # Create an encoder, consuming the frames and encoding them using H.265 encoding -videoEncoder = pipeline.createVideoEncoder() +videoEncoder = pipeline.create(dai.node.VideoEncoder) videoEncoder.setDefaultProfilePreset(3840, 2160, 30, dai.VideoEncoderProperties.Profile.H265_MAIN) cam.video.link(videoEncoder.input) # Create output -videoOut = pipeline.createXLinkOut() +videoOut = pipeline.create(dai.node.XLinkOut) videoOut.setStreamName('h265') videoEncoder.bitstream.link(videoOut.input) diff --git a/examples/05_rgb_mono_encoding.py b/examples/05_rgb_mono_encoding.py index 11bc5c478..029ca02d3 100755 --- a/examples/05_rgb_mono_encoding.py +++ b/examples/05_rgb_mono_encoding.py @@ -6,35 +6,35 @@ pipeline = dai.Pipeline() # Define a source - color and mono cameras -colorCam = pipeline.createColorCamera() -monoCam = pipeline.createMonoCamera() +colorCam = pipeline.create(dai.node.ColorCamera) +monoCam = pipeline.create(dai.node.MonoCamera) monoCam.setBoardSocket(dai.CameraBoardSocket.LEFT) -monoCam2 = pipeline.createMonoCamera() +monoCam2 = pipeline.create(dai.node.MonoCamera) monoCam2.setBoardSocket(dai.CameraBoardSocket.RIGHT) # Create encoders, one for each camera, consuming the frames and encoding them using H.264 / H.265 encoding -ve1 = pipeline.createVideoEncoder() +ve1 = pipeline.create(dai.node.VideoEncoder) ve1.setDefaultProfilePreset(1280, 720, 30, dai.VideoEncoderProperties.Profile.H264_MAIN) monoCam.out.link(ve1.input) -ve2 = pipeline.createVideoEncoder() +ve2 = pipeline.create(dai.node.VideoEncoder) ve2.setDefaultProfilePreset(1920, 1080, 30, dai.VideoEncoderProperties.Profile.H265_MAIN) colorCam.video.link(ve2.input) -ve3 = pipeline.createVideoEncoder() +ve3 = pipeline.create(dai.node.VideoEncoder) ve3.setDefaultProfilePreset(1280, 720, 30, dai.VideoEncoderProperties.Profile.H264_MAIN) monoCam2.out.link(ve3.input) # Create outputs -ve1Out = pipeline.createXLinkOut() +ve1Out = pipeline.create(dai.node.XLinkOut) ve1Out.setStreamName('ve1Out') ve1.bitstream.link(ve1Out.input) -ve2Out = pipeline.createXLinkOut() +ve2Out = pipeline.create(dai.node.XLinkOut) ve2Out.setStreamName('ve2Out') ve2.bitstream.link(ve2Out.input) -ve3Out = pipeline.createXLinkOut() +ve3Out = pipeline.create(dai.node.XLinkOut) ve3Out.setStreamName('ve3Out') ve3.bitstream.link(ve3Out.input) diff --git a/examples/06_rgb_full_resolution_saver.py b/examples/06_rgb_full_resolution_saver.py index 9e526cef1..575c440b2 100755 --- a/examples/06_rgb_full_resolution_saver.py +++ b/examples/06_rgb_full_resolution_saver.py @@ -10,21 +10,21 @@ pipeline = dai.Pipeline() # Define a source - color camera -camRgb = pipeline.createColorCamera() +camRgb = pipeline.create(dai.node.ColorCamera) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) # Create RGB output -xoutRgb = pipeline.createXLinkOut() +xoutRgb = pipeline.create(dai.node.XLinkOut) xoutRgb.setStreamName("rgb") camRgb.video.link(xoutRgb.input) # Create encoder to produce JPEG images -videoEnc = pipeline.createVideoEncoder() +videoEnc = pipeline.create(dai.node.VideoEncoder) videoEnc.setDefaultProfilePreset(camRgb.getVideoSize(), camRgb.getFps(), dai.VideoEncoderProperties.Profile.MJPEG) camRgb.video.link(videoEnc.input) # Create JPEG output -xoutJpeg = pipeline.createXLinkOut() +xoutJpeg = pipeline.create(dai.node.XLinkOut) xoutJpeg.setStreamName("jpeg") videoEnc.bitstream.link(xoutJpeg.input) diff --git a/examples/07_mono_full_resolution_saver.py b/examples/07_mono_full_resolution_saver.py index 83749f01c..befa69429 100755 --- a/examples/07_mono_full_resolution_saver.py +++ b/examples/07_mono_full_resolution_saver.py @@ -10,12 +10,12 @@ pipeline = dai.Pipeline() # Define a source - mono (grayscale) camera -camRight = pipeline.createMonoCamera() +camRight = pipeline.create(dai.node.MonoCamera) camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) camRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) # Create output -xoutRight = pipeline.createXLinkOut() +xoutRight = pipeline.create(dai.node.XLinkOut) xoutRight.setStreamName("right") camRight.out.link(xoutRight.input) diff --git a/examples/08_rgb_mobilenet.py b/examples/08_rgb_mobilenet.py index d17104260..152766f8a 100755 --- a/examples/08_rgb_mobilenet.py +++ b/examples/08_rgb_mobilenet.py @@ -17,13 +17,13 @@ pipeline = dai.Pipeline() # Define a source - color camera -camRgb = pipeline.createColorCamera() +camRgb = pipeline.create(dai.node.ColorCamera) camRgb.setPreviewSize(300, 300) camRgb.setInterleaved(False) camRgb.setFps(40) # Define a neural network that will make predictions based on the source frames -nn = pipeline.createMobileNetDetectionNetwork() +nn = pipeline.create(dai.node.MobileNetDetectionNetwork) nn.setConfidenceThreshold(0.5) nn.setBlobPath(args.nnPath) nn.setNumInferenceThreads(2) @@ -31,14 +31,14 @@ camRgb.preview.link(nn.input) # Create outputs -xoutRgb = pipeline.createXLinkOut() +xoutRgb = pipeline.create(dai.node.XLinkOut) xoutRgb.setStreamName("rgb") if args.sync: nn.passthrough.link(xoutRgb.input) else: camRgb.preview.link(xoutRgb.input) -nnOut = pipeline.createXLinkOut() +nnOut = pipeline.create(dai.node.XLinkOut) nnOut.setStreamName("nn") nn.out.link(nnOut.input) diff --git a/examples/09_mono_mobilenet.py b/examples/09_mono_mobilenet.py index a98bec289..d04399ba4 100755 --- a/examples/09_mono_mobilenet.py +++ b/examples/09_mono_mobilenet.py @@ -16,19 +16,19 @@ pipeline = dai.Pipeline() # Define a source - mono (grayscale) camera -camRight = pipeline.createMonoCamera() +camRight = pipeline.create(dai.node.MonoCamera) camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) camRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) # Define a neural network that will make predictions based on the source frames -nn = pipeline.createMobileNetDetectionNetwork() +nn = pipeline.create(dai.node.MobileNetDetectionNetwork) nn.setConfidenceThreshold(0.5) nn.setBlobPath(nnPath) nn.setNumInferenceThreads(2) nn.input.setBlocking(False) # Create a node to convert the grayscale frame into the nn-acceptable form -manip = pipeline.createImageManip() +manip = pipeline.create(dai.node.ImageManip) manip.initialConfig.setResize(300, 300) # The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) @@ -36,11 +36,11 @@ manip.out.link(nn.input) # Create outputs -manipOut = pipeline.createXLinkOut() +manipOut = pipeline.create(dai.node.XLinkOut) manipOut.setStreamName("right") manip.out.link(manipOut.input) -nnOut = pipeline.createXLinkOut() +nnOut = pipeline.create(dai.node.XLinkOut) nnOut.setStreamName("nn") nn.out.link(nnOut.input) diff --git a/examples/10_mono_depth_mobilenetssd.py b/examples/10_mono_depth_mobilenetssd.py index eac21f9fb..fe5b8d6f1 100755 --- a/examples/10_mono_depth_mobilenetssd.py +++ b/examples/10_mono_depth_mobilenetssd.py @@ -16,16 +16,16 @@ pipeline = dai.Pipeline() # Define a source - mono (grayscale) cameras -left = pipeline.createMonoCamera() +left = pipeline.create(dai.node.MonoCamera) left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) left.setBoardSocket(dai.CameraBoardSocket.LEFT) -right = pipeline.createMonoCamera() +right = pipeline.create(dai.node.MonoCamera) right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) right.setBoardSocket(dai.CameraBoardSocket.RIGHT) # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) -stereo = pipeline.createStereoDepth() +stereo = pipeline.create(dai.node.StereoDepth) stereo.setOutputRectified(True) # The rectified streams are horizontally mirrored by default stereo.setConfidenceThreshold(255) stereo.setRectifyEdgeFillColor(0) # Black, to better see the cutout from rectification (black stripe on the edges) @@ -34,14 +34,14 @@ right.out.link(stereo.right) # Create a node to convert the grayscale frame into the nn-acceptable form -manip = pipeline.createImageManip() +manip = pipeline.create(dai.node.ImageManip) manip.initialConfig.setResize(300, 300) # The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) stereo.rectifiedRight.link(manip.inputImage) # Define a neural network that will make predictions based on the source frames -nn = pipeline.createMobileNetDetectionNetwork() +nn = pipeline.create(dai.node.MobileNetDetectionNetwork) nn.setConfidenceThreshold(0.5) nn.setBlobPath(nnPath) nn.setNumInferenceThreads(2) @@ -49,16 +49,16 @@ manip.out.link(nn.input) # Create outputs -depthOut = pipeline.createXLinkOut() +depthOut = pipeline.create(dai.node.XLinkOut) depthOut.setStreamName("depth") stereo.disparity.link(depthOut.input) -xoutRight = pipeline.createXLinkOut() +xoutRight = pipeline.create(dai.node.XLinkOut) xoutRight.setStreamName("rectifiedRight") manip.out.link(xoutRight.input) -nnOut = pipeline.createXLinkOut() +nnOut = pipeline.create(dai.node.XLinkOut) nnOut.setStreamName("nn") nn.out.link(nnOut.input) diff --git a/examples/11_rgb_encoding_mono_mobilenet.py b/examples/11_rgb_encoding_mono_mobilenet.py index 98d75b31d..8191cc5e2 100755 --- a/examples/11_rgb_encoding_mono_mobilenet.py +++ b/examples/11_rgb_encoding_mono_mobilenet.py @@ -14,44 +14,44 @@ pipeline = dai.Pipeline() -cam = pipeline.createColorCamera() +cam = pipeline.create(dai.node.ColorCamera) cam.setBoardSocket(dai.CameraBoardSocket.RGB) cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) -videoEncoder = pipeline.createVideoEncoder() +videoEncoder = pipeline.create(dai.node.VideoEncoder) videoEncoder.setDefaultProfilePreset(1920, 1080, 30, dai.VideoEncoderProperties.Profile.H265_MAIN) cam.video.link(videoEncoder.input) -videoOut = pipeline.createXLinkOut() +videoOut = pipeline.create(dai.node.XLinkOut) videoOut.setStreamName('h265') videoEncoder.bitstream.link(videoOut.input) -camRight = pipeline.createMonoCamera() +camRight = pipeline.create(dai.node.MonoCamera) camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) camRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -nn = pipeline.createMobileNetDetectionNetwork() +nn = pipeline.create(dai.node.MobileNetDetectionNetwork) nn.setConfidenceThreshold(0.5) nn.setBlobPath(nnPath) nn.setNumInferenceThreads(2) nn.input.setBlocking(False) -manip = pipeline.createImageManip() +manip = pipeline.create(dai.node.ImageManip) manip.initialConfig.setResize(300, 300) # The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) camRight.out.link(manip.inputImage) manip.out.link(nn.input) -xoutRight = pipeline.createXLinkOut() +xoutRight = pipeline.create(dai.node.XLinkOut) xoutRight.setStreamName("right") camRight.out.link(xoutRight.input) -manipOut = pipeline.createXLinkOut() +manipOut = pipeline.create(dai.node.XLinkOut) manipOut.setStreamName("manip") manip.out.link(manipOut.input) -nnOut = pipeline.createXLinkOut() +nnOut = pipeline.create(dai.node.XLinkOut) nnOut.setStreamName("nn") nn.out.link(nnOut.input) diff --git a/examples/12_rgb_encoding_mono_mobilenet_depth.py b/examples/12_rgb_encoding_mono_mobilenet_depth.py index b1cff44f1..0bb838c0a 100755 --- a/examples/12_rgb_encoding_mono_mobilenet_depth.py +++ b/examples/12_rgb_encoding_mono_mobilenet_depth.py @@ -14,26 +14,26 @@ pipeline = dai.Pipeline() -cam = pipeline.createColorCamera() +cam = pipeline.create(dai.node.ColorCamera) cam.setBoardSocket(dai.CameraBoardSocket.RGB) cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) -videoEncoder = pipeline.createVideoEncoder() +videoEncoder = pipeline.create(dai.node.VideoEncoder) videoEncoder.setDefaultProfilePreset(1920, 1080, 30, dai.VideoEncoderProperties.Profile.H265_MAIN) cam.video.link(videoEncoder.input) -videoOut = pipeline.createXLinkOut() +videoOut = pipeline.create(dai.node.XLinkOut) videoOut.setStreamName('h265') videoEncoder.bitstream.link(videoOut.input) -camLeft = pipeline.createMonoCamera() +camLeft = pipeline.create(dai.node.MonoCamera) camLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) -camRight = pipeline.createMonoCamera() +camRight = pipeline.create(dai.node.MonoCamera) camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) camRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -depth = pipeline.createStereoDepth() +depth = pipeline.create(dai.node.StereoDepth) depth.setConfidenceThreshold(200) # Note: the rectified streams are horizontally mirrored by default depth.setOutputRectified(True) @@ -41,32 +41,32 @@ camLeft.out.link(depth.left) camRight.out.link(depth.right) -depthOut = pipeline.createXLinkOut() +depthOut = pipeline.create(dai.node.XLinkOut) depthOut.setStreamName("depth") depth.disparity.link(depthOut.input) -nn = pipeline.createMobileNetDetectionNetwork() +nn = pipeline.create(dai.node.MobileNetDetectionNetwork) nn.setConfidenceThreshold(0.5) nn.setBlobPath(nnPath) nn.setNumInferenceThreads(2) nn.input.setBlocking(False) -manip = pipeline.createImageManip() +manip = pipeline.create(dai.node.ImageManip) manip.initialConfig.setResize(300, 300) # The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) depth.rectifiedRight.link(manip.inputImage) manip.out.link(nn.input) -xoutRight = pipeline.createXLinkOut() +xoutRight = pipeline.create(dai.node.XLinkOut) xoutRight.setStreamName("right") camRight.out.link(xoutRight.input) -manipOut = pipeline.createXLinkOut() +manipOut = pipeline.create(dai.node.XLinkOut) manipOut.setStreamName("manip") manip.out.link(manipOut.input) -nnOut = pipeline.createXLinkOut() +nnOut = pipeline.create(dai.node.XLinkOut) nnOut.setStreamName("nn") nn.out.link(nnOut.input) diff --git a/examples/13_encoding_max_limit.py b/examples/13_encoding_max_limit.py index ce076c70a..6bbcdd433 100755 --- a/examples/13_encoding_max_limit.py +++ b/examples/13_encoding_max_limit.py @@ -5,17 +5,17 @@ pipeline = dai.Pipeline() # Nodes -colorCam = pipeline.createColorCamera() +colorCam = pipeline.create(dai.node.ColorCamera) colorCam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) -monoCam = pipeline.createMonoCamera() -monoCam2 = pipeline.createMonoCamera() -ve1 = pipeline.createVideoEncoder() -ve2 = pipeline.createVideoEncoder() -ve3 = pipeline.createVideoEncoder() - -ve1Out = pipeline.createXLinkOut() -ve2Out = pipeline.createXLinkOut() -ve3Out = pipeline.createXLinkOut() +monoCam = pipeline.create(dai.node.MonoCamera) +monoCam2 = pipeline.create(dai.node.MonoCamera) +ve1 = pipeline.create(dai.node.VideoEncoder) +ve2 = pipeline.create(dai.node.VideoEncoder) +ve3 = pipeline.create(dai.node.VideoEncoder) + +ve1Out = pipeline.create(dai.node.XLinkOut) +ve2Out = pipeline.create(dai.node.XLinkOut) +ve3Out = pipeline.create(dai.node.XLinkOut) # Properties monoCam.setBoardSocket(dai.CameraBoardSocket.LEFT) diff --git a/examples/14_1_color_camera_control.py b/examples/14_1_color_camera_control.py index e9fe2532c..a3cb01f1f 100755 --- a/examples/14_1_color_camera_control.py +++ b/examples/14_1_color_camera_control.py @@ -26,14 +26,14 @@ pipeline = dai.Pipeline() # Nodes -colorCam = pipeline.createColorCamera() -controlIn = pipeline.createXLinkIn() -configIn = pipeline.createXLinkIn() -videoEncoder = pipeline.createVideoEncoder() -stillEncoder = pipeline.createVideoEncoder() -videoMjpegOut = pipeline.createXLinkOut() -stillMjpegOut = pipeline.createXLinkOut() -previewOut = pipeline.createXLinkOut() +colorCam = pipeline.create(dai.node.ColorCamera) +controlIn = pipeline.create(dai.node.XLinkIn) +configIn = pipeline.create(dai.node.XLinkIn) +videoEncoder = pipeline.create(dai.node.VideoEncoder) +stillEncoder = pipeline.create(dai.node.VideoEncoder) +videoMjpegOut = pipeline.create(dai.node.XLinkOut) +stillMjpegOut = pipeline.create(dai.node.XLinkOut) +previewOut = pipeline.create(dai.node.XLinkOut) # Properties diff --git a/examples/14_2_mono_camera_control.py b/examples/14_2_mono_camera_control.py index 659d94d55..df087e49a 100755 --- a/examples/14_2_mono_camera_control.py +++ b/examples/14_2_mono_camera_control.py @@ -24,10 +24,10 @@ pipeline = dai.Pipeline() # Define a source - two mono (grayscale) camera -camRight = pipeline.createMonoCamera() +camRight = pipeline.create(dai.node.MonoCamera) camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) camRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -camLeft = pipeline.createMonoCamera() +camLeft = pipeline.create(dai.node.MonoCamera) camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) camLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) @@ -35,20 +35,20 @@ topLeft = dai.Point2f(0.4, 0.4) bottomRight = dai.Point2f(0.6, 0.6) -manipRight = pipeline.createImageManip() +manipRight = pipeline.create(dai.node.ImageManip) manipRight.initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y) -manipLeft = pipeline.createImageManip() +manipLeft = pipeline.create(dai.node.ImageManip) manipLeft.initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y) manipRight.setMaxOutputFrameSize(camRight.getResolutionHeight()*camRight.getResolutionWidth()*3) # Camera movement config (wasd) -configIn = pipeline.createXLinkIn() +configIn = pipeline.create(dai.node.XLinkIn) configIn.setStreamName('config') configIn.out.link(manipRight.inputConfig) configIn.out.link(manipLeft.inputConfig) # Camera control (exp, iso, focus) -controlIn = pipeline.createXLinkIn() +controlIn = pipeline.create(dai.node.XLinkIn) controlIn.setStreamName('control') controlIn.out.link(camRight.inputControl) controlIn.out.link(camLeft.inputControl) @@ -58,11 +58,11 @@ camLeft.out.link(manipLeft.inputImage) # Create outputs -manipOutRight = pipeline.createXLinkOut() +manipOutRight = pipeline.create(dai.node.XLinkOut) manipOutRight.setStreamName("right") manipRight.out.link(manipOutRight.input) -manipOutLeft = pipeline.createXLinkOut() +manipOutLeft = pipeline.create(dai.node.XLinkOut) manipOutLeft.setStreamName("left") manipLeft.out.link(manipOutLeft.input) diff --git a/examples/14_3_depth_crop_control.py b/examples/14_3_depth_crop_control.py index 87c25761f..46ebb00eb 100644 --- a/examples/14_3_depth_crop_control.py +++ b/examples/14_3_depth_crop_control.py @@ -15,11 +15,11 @@ pipeline = dai.Pipeline() # Define a source - two mono (grayscale) cameras -left = pipeline.createMonoCamera() +left = pipeline.create(dai.node.MonoCamera) left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) left.setBoardSocket(dai.CameraBoardSocket.LEFT) -right = pipeline.createMonoCamera() +right = pipeline.create(dai.node.MonoCamera) right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) right.setBoardSocket(dai.CameraBoardSocket.RIGHT) @@ -27,13 +27,13 @@ topLeft = dai.Point2f(0.4, 0.4) bottomRight = dai.Point2f(0.6, 0.6) -manip = pipeline.createImageManip() +manip = pipeline.create(dai.node.ImageManip) manip.initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y) manip.setMaxOutputFrameSize(right.getResolutionHeight()*right.getResolutionWidth()*3) # Create a node that will produce the depth map -stereo = pipeline.createStereoDepth() +stereo = pipeline.create(dai.node.StereoDepth) stereo.setConfidenceThreshold(200) stereo.setOutputDepth(True) @@ -42,12 +42,12 @@ # Control movement -controlIn = pipeline.createXLinkIn() +controlIn = pipeline.create(dai.node.XLinkIn) controlIn.setStreamName('control') controlIn.out.link(manip.inputConfig) # Create outputs -xout = pipeline.createXLinkOut() +xout = pipeline.create(dai.node.XLinkOut) xout.setStreamName("depth") stereo.depth.link(manip.inputImage) manip.out.link(xout.input) diff --git a/examples/15_rgb_mobilenet_4k.py b/examples/15_rgb_mobilenet_4k.py index 4459339db..1bd0c266d 100755 --- a/examples/15_rgb_mobilenet_4k.py +++ b/examples/15_rgb_mobilenet_4k.py @@ -15,14 +15,14 @@ pipeline = dai.Pipeline() # Define a source - color camera -camRgb = pipeline.createColorCamera() +camRgb = pipeline.create(dai.node.ColorCamera) camRgb.setPreviewSize(300, 300) # NN input camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) camRgb.setInterleaved(False) camRgb.setPreviewKeepAspectRatio(False) # Define a neural network that will make predictions based on the source frames -nn = pipeline.createMobileNetDetectionNetwork() +nn = pipeline.create(dai.node.MobileNetDetectionNetwork) nn.setConfidenceThreshold(0.5) nn.setBlobPath(nnPath) nn.setNumInferenceThreads(2) @@ -30,15 +30,15 @@ camRgb.preview.link(nn.input) # Create outputs -xoutVideo = pipeline.createXLinkOut() +xoutVideo = pipeline.create(dai.node.XLinkOut) xoutVideo.setStreamName("video") camRgb.video.link(xoutVideo.input) -xoutPreview = pipeline.createXLinkOut() +xoutPreview = pipeline.create(dai.node.XLinkOut) xoutPreview.setStreamName("preview") camRgb.preview.link(xoutPreview.input) -nnOut = pipeline.createXLinkOut() +nnOut = pipeline.create(dai.node.XLinkOut) nnOut.setStreamName("nn") nn.out.link(nnOut.input) diff --git a/examples/16_device_queue_event.py b/examples/16_device_queue_event.py index 553f05116..270f85556 100755 --- a/examples/16_device_queue_event.py +++ b/examples/16_device_queue_event.py @@ -10,11 +10,11 @@ pipeline = dai.Pipeline() # Create Color and Mono cameras -camRgb = pipeline.createColorCamera() -camMono = pipeline.createMonoCamera() +camRgb = pipeline.create(dai.node.ColorCamera) +camMono = pipeline.create(dai.node.MonoCamera) # Create separate streams for them -xoutRgb = pipeline.createXLinkOut() -xoutMono = pipeline.createXLinkOut() +xoutRgb = pipeline.create(dai.node.XLinkOut) +xoutMono = pipeline.create(dai.node.XLinkOut) # Set properties xoutRgb.setStreamName("rgb") diff --git a/examples/17_video_mobilenet.py b/examples/17_video_mobilenet.py index dca49608c..fd4841e58 100755 --- a/examples/17_video_mobilenet.py +++ b/examples/17_video_mobilenet.py @@ -19,11 +19,11 @@ # Create neural network input -xinDet = pipeline.createXLinkIn() +xinDet = pipeline.create(dai.node.XLinkIn) xinDet.setStreamName("inDet") # Define a neural network that will make predictions based on the source frames -nn = pipeline.createMobileNetDetectionNetwork() +nn = pipeline.create(dai.node.MobileNetDetectionNetwork) nn.setConfidenceThreshold(0.5) nn.setBlobPath(nnPath) nn.setNumInferenceThreads(2) @@ -31,7 +31,7 @@ xinDet.out.link(nn.input) # Create output -nnOut = pipeline.createXLinkOut() +nnOut = pipeline.create(dai.node.XLinkOut) nnOut.setStreamName("nn") nn.out.link(nnOut.input) diff --git a/examples/18_rgb_encoding_mobilenet.py b/examples/18_rgb_encoding_mobilenet.py index edf3be161..88662fe26 100755 --- a/examples/18_rgb_encoding_mobilenet.py +++ b/examples/18_rgb_encoding_mobilenet.py @@ -14,32 +14,32 @@ pipeline = dai.Pipeline() -cam = pipeline.createColorCamera() +cam = pipeline.create(dai.node.ColorCamera) cam.setBoardSocket(dai.CameraBoardSocket.RGB) cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) cam.setPreviewSize(300, 300) cam.setInterleaved(False) -videoEncoder = pipeline.createVideoEncoder() +videoEncoder = pipeline.create(dai.node.VideoEncoder) videoEncoder.setDefaultProfilePreset(1920, 1080, 30, dai.VideoEncoderProperties.Profile.H265_MAIN) cam.video.link(videoEncoder.input) -nn = pipeline.createMobileNetDetectionNetwork() +nn = pipeline.create(dai.node.MobileNetDetectionNetwork) nn.setConfidenceThreshold(0.5) nn.setBlobPath(nnPath) nn.setNumInferenceThreads(2) nn.input.setBlocking(False) cam.preview.link(nn.input) -videoOut = pipeline.createXLinkOut() +videoOut = pipeline.create(dai.node.XLinkOut) videoOut.setStreamName('h265') videoEncoder.bitstream.link(videoOut.input) -xoutRgb = pipeline.createXLinkOut() +xoutRgb = pipeline.create(dai.node.XLinkOut) xoutRgb.setStreamName("rgb") cam.preview.link(xoutRgb.input) -nnOut = pipeline.createXLinkOut() +nnOut = pipeline.create(dai.node.XLinkOut) nnOut.setStreamName("nn") nn.out.link(nnOut.input) diff --git a/examples/19_mono_camera_control.py b/examples/19_mono_camera_control.py index bb4eb076b..84725191a 100755 --- a/examples/19_mono_camera_control.py +++ b/examples/19_mono_camera_control.py @@ -15,24 +15,24 @@ pipeline = dai.Pipeline() # Define a source - two mono (grayscale) cameras -camLeft = pipeline.createMonoCamera() +camLeft = pipeline.create(dai.node.MonoCamera) camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) camLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -camRight = pipeline.createMonoCamera() +camRight = pipeline.create(dai.node.MonoCamera) camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) camRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) # Create outputs -xoutLeft = pipeline.createXLinkOut() +xoutLeft = pipeline.create(dai.node.XLinkOut) xoutLeft.setStreamName('left') camLeft.out.link(xoutLeft.input) -xoutRight = pipeline.createXLinkOut() +xoutRight = pipeline.create(dai.node.XLinkOut) xoutRight.setStreamName('right') camRight.out.link(xoutRight.input) # Create and link control input -control_in = pipeline.createXLinkIn() +control_in = pipeline.create(dai.node.XLinkIn) control_in.setStreamName('control') control_in.out.link(camLeft.inputControl) control_in.out.link(camRight.inputControl) diff --git a/examples/20_color_rotate_warp.py b/examples/20_color_rotate_warp.py index dd8b7e085..653df27a4 100755 --- a/examples/20_color_rotate_warp.py +++ b/examples/20_color_rotate_warp.py @@ -59,17 +59,17 @@ def printControls(): pipeline = dai.Pipeline() -cam = pipeline.createColorCamera() +cam = pipeline.create(dai.node.ColorCamera) cam.setPreviewSize(640, 480) cam.setInterleaved(False) -camOut = pipeline.createXLinkOut() +camOut = pipeline.create(dai.node.XLinkOut) camOut.setStreamName("preview") -manip = pipeline.createImageManip() +manip = pipeline.create(dai.node.ImageManip) manip.setMaxOutputFrameSize(2000*1500*3) -manipOut = pipeline.createXLinkOut() +manipOut = pipeline.create(dai.node.XLinkOut) manipOut.setStreamName("manip") -manipCfg = pipeline.createXLinkIn() +manipCfg = pipeline.create(dai.node.XLinkIn) manipCfg.setStreamName("manipCfg") cam.preview.link(camOut.input) diff --git a/examples/22_1_tiny_yolo_v3_device_side_decoding.py b/examples/22_1_tiny_yolo_v3_device_side_decoding.py index 36b005225..37a0405fc 100755 --- a/examples/22_1_tiny_yolo_v3_device_side_decoding.py +++ b/examples/22_1_tiny_yolo_v3_device_side_decoding.py @@ -42,13 +42,13 @@ pipeline = dai.Pipeline() # Define a source - color camera -camRgb = pipeline.createColorCamera() +camRgb = pipeline.create(dai.node.ColorCamera) camRgb.setPreviewSize(416, 416) camRgb.setInterleaved(False) camRgb.setFps(40) # network specific settings -detectionNetwork = pipeline.createYoloDetectionNetwork() +detectionNetwork = pipeline.create(dai.node.YoloDetectionNetwork) detectionNetwork.setConfidenceThreshold(0.5) detectionNetwork.setNumClasses(80) detectionNetwork.setCoordinateSize(4) @@ -63,14 +63,14 @@ camRgb.preview.link(detectionNetwork.input) # Create outputs -xoutRgb = pipeline.createXLinkOut() +xoutRgb = pipeline.create(dai.node.XLinkOut) xoutRgb.setStreamName("rgb") if syncNN: detectionNetwork.passthrough.link(xoutRgb.input) else: camRgb.preview.link(xoutRgb.input) -nnOut = pipeline.createXLinkOut() +nnOut = pipeline.create(dai.node.XLinkOut) nnOut.setStreamName("detections") detectionNetwork.out.link(nnOut.input) diff --git a/examples/22_2_tiny_yolo_v4_device_side_decoding.py b/examples/22_2_tiny_yolo_v4_device_side_decoding.py index 4f638ee55..dc5ca4778 100755 --- a/examples/22_2_tiny_yolo_v4_device_side_decoding.py +++ b/examples/22_2_tiny_yolo_v4_device_side_decoding.py @@ -41,13 +41,13 @@ pipeline = dai.Pipeline() # Define a source - color camera -camRgb = pipeline.createColorCamera() +camRgb = pipeline.create(dai.node.ColorCamera) camRgb.setPreviewSize(416, 416) camRgb.setInterleaved(False) camRgb.setFps(40) # network specific settings -detectionNetwork = pipeline.createYoloDetectionNetwork() +detectionNetwork = pipeline.create(dai.node.YoloDetectionNetwork) detectionNetwork.setConfidenceThreshold(0.5) detectionNetwork.setNumClasses(80) detectionNetwork.setCoordinateSize(4) @@ -62,14 +62,14 @@ camRgb.preview.link(detectionNetwork.input) # Create outputs -xoutRgb = pipeline.createXLinkOut() +xoutRgb = pipeline.create(dai.node.XLinkOut) xoutRgb.setStreamName("rgb") if syncNN: detectionNetwork.passthrough.link(xoutRgb.input) else: camRgb.preview.link(xoutRgb.input) -nnOut = pipeline.createXLinkOut() +nnOut = pipeline.create(dai.node.XLinkOut) nnOut.setStreamName("detections") detectionNetwork.out.link(nnOut.input) diff --git a/examples/23_autoexposure_roi.py b/examples/23_autoexposure_roi.py index 8ce0fa11a..b7dc02891 100755 --- a/examples/23_autoexposure_roi.py +++ b/examples/23_autoexposure_roi.py @@ -20,16 +20,16 @@ pipeline = dai.Pipeline() # Define a source - color camera -camRgb = pipeline.createColorCamera() +camRgb = pipeline.create(dai.node.ColorCamera) camRgb.setPreviewSize(*previewSize) camRgb.setInterleaved(False) -camControlIn = pipeline.createXLinkIn() +camControlIn = pipeline.create(dai.node.XLinkIn) camControlIn.setStreamName('camControl') camControlIn.out.link(camRgb.inputControl) # Define a neural network that will make predictions based on the source frames -nn = pipeline.createMobileNetDetectionNetwork() +nn = pipeline.create(dai.node.MobileNetDetectionNetwork) nn.setConfidenceThreshold(0.5) nn.setBlobPath(nnPath) nn.setNumInferenceThreads(2) @@ -37,11 +37,11 @@ camRgb.preview.link(nn.input) # Create outputs -xoutRgb = pipeline.createXLinkOut() +xoutRgb = pipeline.create(dai.node.XLinkOut) xoutRgb.setStreamName("rgb") camRgb.preview.link(xoutRgb.input) -nnOut = pipeline.createXLinkOut() +nnOut = pipeline.create(dai.node.XLinkOut) nnOut.setStreamName("nn") nn.out.link(nnOut.input) diff --git a/examples/24_opencv_support.py b/examples/24_opencv_support.py index 891bb09eb..0cc4cd43d 100755 --- a/examples/24_opencv_support.py +++ b/examples/24_opencv_support.py @@ -7,7 +7,7 @@ pipeline = dai.Pipeline() # Define a source - color camera -camRgb = pipeline.createColorCamera() +camRgb = pipeline.create(dai.node.ColorCamera) camRgb.setPreviewSize(300, 300) camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) @@ -15,9 +15,9 @@ camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) # Create output -xoutVideo = pipeline.createXLinkOut() +xoutVideo = pipeline.create(dai.node.XLinkOut) xoutVideo.setStreamName("video") -xoutPreview = pipeline.createXLinkOut() +xoutPreview = pipeline.create(dai.node.XLinkOut) xoutPreview.setStreamName("preview") camRgb.preview.link(xoutPreview.input) diff --git a/examples/25_system_information.py b/examples/25_system_information.py index cc118b3cb..63962c324 100755 --- a/examples/25_system_information.py +++ b/examples/25_system_information.py @@ -19,11 +19,11 @@ def print_sys_info(info): # Start defining a pipeline pipeline = dai.Pipeline() -sys_logger = pipeline.createSystemLogger() +sys_logger = pipeline.create(dai.node.SystemLogger) sys_logger.setRate(1) # 1 Hz # Create output -linkOut = pipeline.createXLinkOut() +linkOut = pipeline.create(dai.node.XLinkOut) linkOut.setStreamName("sysinfo") sys_logger.out.link(linkOut.input) diff --git a/examples/26_1_spatial_mobilenet.py b/examples/26_1_spatial_mobilenet.py index e0f522b11..d0c5168eb 100755 --- a/examples/26_1_spatial_mobilenet.py +++ b/examples/26_1_spatial_mobilenet.py @@ -27,16 +27,16 @@ pipeline = dai.Pipeline() # Define a source - color camera -colorCam = pipeline.createColorCamera() -spatialDetectionNetwork = pipeline.createMobileNetSpatialDetectionNetwork() -monoLeft = pipeline.createMonoCamera() -monoRight = pipeline.createMonoCamera() -stereo = pipeline.createStereoDepth() - -xoutRgb = pipeline.createXLinkOut() -xoutNN = pipeline.createXLinkOut() -xoutBoundingBoxDepthMapping = pipeline.createXLinkOut() -xoutDepth = pipeline.createXLinkOut() +colorCam = pipeline.create(dai.node.ColorCamera) +spatialDetectionNetwork = pipeline.create(dai.node.MobileNetSpatialDetectionNetwork) +monoLeft = pipeline.create(dai.node.MonoCamera) +monoRight = pipeline.create(dai.node.MonoCamera) +stereo = pipeline.create(dai.node.StereoDepth) + +xoutRgb = pipeline.create(dai.node.XLinkOut) +xoutNN = pipeline.create(dai.node.XLinkOut) +xoutBoundingBoxDepthMapping = pipeline.create(dai.node.XLinkOut) +xoutDepth = pipeline.create(dai.node.XLinkOut) xoutRgb.setStreamName("rgb") xoutNN.setStreamName("detections") diff --git a/examples/26_2_spatial_mobilenet_mono.py b/examples/26_2_spatial_mobilenet_mono.py index 7e17bc2a0..41053702a 100755 --- a/examples/26_2_spatial_mobilenet_mono.py +++ b/examples/26_2_spatial_mobilenet_mono.py @@ -30,14 +30,14 @@ pipeline = dai.Pipeline() -manip = pipeline.createImageManip() +manip = pipeline.create(dai.node.ImageManip) manip.initialConfig.setResize(300, 300) # The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case) manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) # manip.setKeepAspectRatio(False) # Define a neural network that will make predictions based on the source frames -spatialDetectionNetwork = pipeline.createMobileNetSpatialDetectionNetwork() +spatialDetectionNetwork = pipeline.create(dai.node.MobileNetSpatialDetectionNetwork) spatialDetectionNetwork.setConfidenceThreshold(0.5) spatialDetectionNetwork.setBlobPath(nnPath) spatialDetectionNetwork.input.setBlocking(False) @@ -48,27 +48,27 @@ manip.out.link(spatialDetectionNetwork.input) # Create outputs -xoutManip = pipeline.createXLinkOut() +xoutManip = pipeline.create(dai.node.XLinkOut) xoutManip.setStreamName("right") if(syncNN): spatialDetectionNetwork.passthrough.link(xoutManip.input) else: manip.out.link(xoutManip.input) -depthRoiMap = pipeline.createXLinkOut() +depthRoiMap = pipeline.create(dai.node.XLinkOut) depthRoiMap.setStreamName("boundingBoxDepthMapping") -xoutDepth = pipeline.createXLinkOut() +xoutDepth = pipeline.create(dai.node.XLinkOut) xoutDepth.setStreamName("depth") -nnOut = pipeline.createXLinkOut() +nnOut = pipeline.create(dai.node.XLinkOut) nnOut.setStreamName("detections") spatialDetectionNetwork.out.link(nnOut.input) spatialDetectionNetwork.boundingBoxMapping.link(depthRoiMap.input) -monoLeft = pipeline.createMonoCamera() -monoRight = pipeline.createMonoCamera() -stereo = pipeline.createStereoDepth() +monoLeft = pipeline.create(dai.node.MonoCamera) +monoRight = pipeline.create(dai.node.MonoCamera) +stereo = pipeline.create(dai.node.StereoDepth) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) diff --git a/examples/26_3_spatial_tiny_yolo.py b/examples/26_3_spatial_tiny_yolo.py index e7316d85c..0681bfd4f 100755 --- a/examples/26_3_spatial_tiny_yolo.py +++ b/examples/26_3_spatial_tiny_yolo.py @@ -40,16 +40,16 @@ pipeline = dai.Pipeline() # Define a source - color camera -colorCam = pipeline.createColorCamera() -spatialDetectionNetwork = pipeline.createYoloSpatialDetectionNetwork() -monoLeft = pipeline.createMonoCamera() -monoRight = pipeline.createMonoCamera() -stereo = pipeline.createStereoDepth() - -xoutRgb = pipeline.createXLinkOut() -xoutNN = pipeline.createXLinkOut() -xoutBoundingBoxDepthMapping = pipeline.createXLinkOut() -xoutDepth = pipeline.createXLinkOut() +colorCam = pipeline.create(dai.node.ColorCamera) +spatialDetectionNetwork = pipeline.create(dai.node.YoloSpatialDetectionNetwork) +monoLeft = pipeline.create(dai.node.MonoCamera) +monoRight = pipeline.create(dai.node.MonoCamera) +stereo = pipeline.create(dai.node.StereoDepth) + +xoutRgb = pipeline.create(dai.node.XLinkOut) +xoutNN = pipeline.create(dai.node.XLinkOut) +xoutBoundingBoxDepthMapping = pipeline.create(dai.node.XLinkOut) +xoutDepth = pipeline.create(dai.node.XLinkOut) xoutRgb.setStreamName("rgb") xoutNN.setStreamName("detections") diff --git a/examples/27_spatial_location_calculator.py b/examples/27_spatial_location_calculator.py index 4b46fb12e..5d6a0e9ff 100755 --- a/examples/27_spatial_location_calculator.py +++ b/examples/27_spatial_location_calculator.py @@ -9,14 +9,14 @@ pipeline = dai.Pipeline() # Define a source - two mono (grayscale) cameras -monoLeft = pipeline.createMonoCamera() -monoRight = pipeline.createMonoCamera() -stereo = pipeline.createStereoDepth() -spatialLocationCalculator = pipeline.createSpatialLocationCalculator() - -xoutDepth = pipeline.createXLinkOut() -xoutSpatialData = pipeline.createXLinkOut() -xinSpatialCalcConfig = pipeline.createXLinkIn() +monoLeft = pipeline.create(dai.node.MonoCamera) +monoRight = pipeline.create(dai.node.MonoCamera) +stereo = pipeline.create(dai.node.StereoDepth) +spatialLocationCalculator = pipeline.create(dai.node.SpatialLocationCalculator) + +xoutDepth = pipeline.create(dai.node.XLinkOut) +xoutSpatialData = pipeline.create(dai.node.XLinkOut) +xinSpatialCalcConfig = pipeline.create(dai.node.XLinkIn) xoutDepth.setStreamName("depth") xoutSpatialData.setStreamName("spatialData") diff --git a/examples/28_camera_video_example.py b/examples/28_camera_video_example.py index e7c949853..de68f2130 100644 --- a/examples/28_camera_video_example.py +++ b/examples/28_camera_video_example.py @@ -8,13 +8,13 @@ pipeline = dai.Pipeline() # Define a source - color camera -colorCam = pipeline.createColorCamera() +colorCam = pipeline.create(dai.node.ColorCamera) colorCam.setBoardSocket(dai.CameraBoardSocket.RGB) colorCam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) colorCam.setVideoSize(1920, 1080) # Create output -xoutVideo = pipeline.createXLinkOut() +xoutVideo = pipeline.create(dai.node.XLinkOut) xoutVideo.setStreamName("video") xoutVideo.input.setBlocking(False) xoutVideo.input.setQueueSize(1) diff --git a/examples/29_simple_script_camera_control.py b/examples/29_simple_script_camera_control.py index b30828c19..6b6f1a9db 100644 --- a/examples/29_simple_script_camera_control.py +++ b/examples/29_simple_script_camera_control.py @@ -11,7 +11,7 @@ cam = pipeline.create(dai.node.ColorCamera) # Script node -script = pipeline.create(dai.node.LxScript) +script = pipeline.create(dai.node.Script) script.setScriptData(""" import time ctrl = CameraControl() diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index 086743fd7..41d021253 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -13,7 +13,7 @@ #include "depthai/pipeline/node/SPIOut.hpp" #include "depthai/pipeline/node/DetectionNetwork.hpp" #include "depthai/pipeline/node/SystemLogger.hpp" -#include "depthai/pipeline/node/LxScript.hpp" +#include "depthai/pipeline/node/Script.hpp" #include "depthai/pipeline/node/SpatialLocationCalculator.hpp" #include "depthai/pipeline/node/SpatialDetectionNetwork.hpp" @@ -701,16 +701,17 @@ void NodeBindings::bind(pybind11::module& m){ .def("setRate", &SystemLogger::setRate, DOC(dai, node, SystemLogger, setRate)) ; - // LxScript node - ADD_NODE(LxScript) - .def_readonly("inputs", &LxScript::inputs) - .def_readonly("outputs", &LxScript::outputs) - .def("setName", &LxScript::setName) - .def("setScriptPath", &LxScript::setScriptPath, DOC(dai, node, LxScript, setScriptPath)) - .def("setScriptData", static_cast(&LxScript::setScriptData), py::arg("script"), DOC(dai, node, LxScript, setScriptData)) - .def("setScriptData", static_cast&)>(&LxScript::setScriptData), py::arg("data"), DOC(dai, node, LxScript, setScriptData, 2)) - .def("setProcessor", &LxScript::setProcessor, DOC(dai, node, LxScript, setProcessor)) - .def("getProcessor", &LxScript::getProcessor, DOC(dai, node, LxScript, getProcessor)) + // Script node + ADD_NODE(Script) + .def_readonly("inputs", &Script::inputs) + .def_readonly("outputs", &Script::outputs) + .def("setScriptPath", &Script::setScriptPath, DOC(dai, node, Script, setScriptPath)) + .def("setScriptData", static_cast(&Script::setScriptData), py::arg("script"), py::arg("name") = "", DOC(dai, node, Script, setScriptData)) + .def("setScriptData", static_cast&, const std::string&)>(&Script::setScriptData), py::arg("data"), py::arg("name") = "", DOC(dai, node, Script, setScriptData, 2)) + .def("getScriptPath", &Script::getScriptPath, DOC(dai, node, Script, getScriptPath)) + .def("getScriptName", &Script::getScriptName, DOC(dai, node, Script, getScriptName)) + .def("setProcessor", &Script::setProcessor, DOC(dai, node, Script, setProcessor)) + .def("getProcessor", &Script::getProcessor, DOC(dai, node, Script, getProcessor)) ; } diff --git a/src/pipeline/PipelineBindings.cpp b/src/pipeline/PipelineBindings.cpp index cb10b652d..06b1cf3e1 100644 --- a/src/pipeline/PipelineBindings.cpp +++ b/src/pipeline/PipelineBindings.cpp @@ -16,7 +16,7 @@ #include "depthai/pipeline/node/MonoCamera.hpp" #include "depthai/pipeline/node/StereoDepth.hpp" #include "depthai/pipeline/node/DetectionNetwork.hpp" -#include "depthai/pipeline/node/LxScript.hpp" +#include "depthai/pipeline/node/Script.hpp" #include "depthai/pipeline/node/SystemLogger.hpp" #include "depthai/pipeline/node/SpatialLocationCalculator.hpp" #include "depthai/pipeline/node/SpatialDetectionNetwork.hpp" @@ -82,24 +82,67 @@ void PipelineBindings::bind(pybind11::module& m){ return node; }) - // TODO(themarpe), deprecate in favor of 'create' - // templated create function - .def("createXLinkIn", &Pipeline::create) - .def("createXLinkOut", &Pipeline::create) - .def("createNeuralNetwork", &Pipeline::create) - .def("createColorCamera", &Pipeline::create) - .def("createVideoEncoder", &Pipeline::create) - .def("createSPIOut", &Pipeline::create) - .def("createImageManip", &Pipeline::create) - .def("createMonoCamera", &Pipeline::create) - .def("createStereoDepth", &Pipeline::create) - .def("createMobileNetDetectionNetwork", &Pipeline::create) - .def("createYoloDetectionNetwork", &Pipeline::create) - .def("createLxScript", &Pipeline::create) - .def("createSystemLogger", &Pipeline::create) - .def("createSpatialLocationCalculator", &Pipeline::create) - .def("createMobileNetSpatialDetectionNetwork", &Pipeline::create) - .def("createYoloSpatialDetectionNetwork", &Pipeline::create) + // DEPRECATED, use pipeline.create([class name]) + .def("createXLinkIn", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createXLinkOut", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createNeuralNetwork", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createColorCamera", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createVideoEncoder", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createSPIOut", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createImageManip", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createMonoCamera", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createStereoDepth", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createMobileNetDetectionNetwork", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createYoloDetectionNetwork", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createSystemLogger", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createSpatialLocationCalculator", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createMobileNetSpatialDetectionNetwork", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) + .def("createYoloSpatialDetectionNetwork", [](Pipeline& p){ + PyErr_WarnEx(PyExc_DeprecationWarning, "create[Node]() is deprecated, use create([Node]) instead.", 1); + return p.create(); + }) ; } From be5c18d8b8bbf699cb234aba7d806a4d8f14866e Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Tue, 6 Apr 2021 14:18:02 +0200 Subject: [PATCH 11/40] Releasing GIL on long function calls --- depthai-core | 2 +- src/DeviceBindings.cpp | 41 +++++++++++++++++--------------- src/DeviceBootloaderBindings.cpp | 10 ++++---- 3 files changed, 28 insertions(+), 25 deletions(-) diff --git a/depthai-core b/depthai-core index 9c5b7fe8f..fde7fd8a5 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 9c5b7fe8f57e7c98d33dacbdbf9f1b460c226b8c +Subproject commit fde7fd8a5682ed1cc1afc6c700de8e5b6b13c81a diff --git a/src/DeviceBindings.cpp b/src/DeviceBindings.cpp index 55c57d1cb..7df14e9bc 100644 --- a/src/DeviceBindings.cpp +++ b/src/DeviceBindings.cpp @@ -80,8 +80,11 @@ void DeviceBindings::bind(pybind11::module& m){ py::class_(m, "Device", DOC(dai, Device)) // Python only methods .def("__enter__", [](py::object obj){ return obj; }) - .def("__exit__", [](Device& d, py::object type, py::object value, py::object traceback) { d.close(); }) - .def("close", &Device::close, "Closes the connection to device. Better alternative is the usage of context manager: `with depthai.Device(pipeline) as device:`") + .def("__exit__", [](Device& d, py::object type, py::object value, py::object traceback) { + py::gil_scoped_release release; + d.close(); + }) + .def("close", [](Device& d) { py::gil_scoped_release release; d.close(); }, "Closes the connection to device. Better alternative is the usage of context manager: `with depthai.Device(pipeline) as device:`") //dai::Device methods //static @@ -111,8 +114,8 @@ void DeviceBindings::bind(pybind11::module& m){ return std::unique_ptr(new Device(pipeline, deviceInfo, pathToCmd)); }), py::arg("pipeline"), py::arg("deviceDesc"), py::arg("pathToCmd"), DOC(dai, Device, Device, 5)) - .def("isPipelineRunning", &Device::isPipelineRunning, DOC(dai, Device, isPipelineRunning)) - .def("startPipeline", &Device::startPipeline, DOC(dai, Device, startPipeline)) + .def("isPipelineRunning", [](Device& d) { py::gil_scoped_release release; return d.isPipelineRunning(); }, DOC(dai, Device, isPipelineRunning)) + .def("startPipeline", [](Device& d) { py::gil_scoped_release release; return d.startPipeline(); }, DOC(dai, Device, startPipeline)) .def("getOutputQueue", static_cast(Device::*)(const std::string&)>(&Device::getOutputQueue), py::arg("name"), DOC(dai, Device, getOutputQueue)) .def("getOutputQueue", static_cast(Device::*)(const std::string&, unsigned int, bool)>(&Device::getOutputQueue), py::arg("name"), py::arg("maxSize"), py::arg("blocking") = true, DOC(dai, Device, getOutputQueue, 2)) @@ -151,21 +154,21 @@ void DeviceBindings::bind(pybind11::module& m){ }, py::arg("timeout") = std::chrono::microseconds(-1), DOC(dai, Device, getQueueEvent, 4)) //.def("setCallback", DeviceWrapper::wrap(&Device::setCallback), py::arg("name"), py::arg("callback")) - .def("setLogLevel", &Device::setLogLevel, py::arg("level"), DOC(dai, Device, setLogLevel)) - .def("getLogLevel", &Device::getLogLevel, DOC(dai, Device, getLogLevel)) - .def("setSystemInformationLoggingRate", &Device::setSystemInformationLoggingRate, py::arg("rateHz"), DOC(dai, Device, setSystemInformationLoggingRate)) - .def("getSystemInformationLoggingRate", &Device::getSystemInformationLoggingRate, DOC(dai, Device, getSystemInformationLoggingRate)) - .def("getDdrMemoryUsage", &Device::getDdrMemoryUsage, DOC(dai, Device, getDdrMemoryUsage)) - .def("getCmxMemoryUsage", &Device::getCmxMemoryUsage, DOC(dai, Device, getCmxMemoryUsage)) - .def("getLeonCssHeapUsage", &Device::getLeonCssHeapUsage, DOC(dai, Device, getLeonCssHeapUsage)) - .def("getLeonMssHeapUsage", &Device::getLeonMssHeapUsage, DOC(dai, Device, getLeonMssHeapUsage)) - .def("getChipTemperature", &Device::getChipTemperature, DOC(dai, Device, getChipTemperature)) - .def("getLeonCssCpuUsage", &Device::getLeonCssCpuUsage, DOC(dai, Device, getLeonCssCpuUsage)) - .def("getLeonMssCpuUsage", &Device::getLeonMssCpuUsage, DOC(dai, Device, getLeonMssCpuUsage)) - .def("setLogOutputLevel", &Device::setLogOutputLevel, py::arg("level"), DOC(dai, Device, setLogOutputLevel)) - .def("getLogOutputLevel", &Device::getLogOutputLevel, DOC(dai, Device, getLogOutputLevel)) - .def("addLogCallback", &Device::addLogCallback, py::arg("callback"), DOC(dai, Device, addLogCallback)) - .def("removeLogCallback", &Device::removeLogCallback, py::arg("callbackId"), DOC(dai, Device, removeLogCallback)) + .def("setLogLevel", [](Device& d, LogLevel l) { py::gil_scoped_release release; d.setLogLevel(l); }, py::arg("level"), DOC(dai, Device, setLogLevel)) + .def("getLogLevel", [](Device& d) { py::gil_scoped_release release; return d.getLogLevel(); }, DOC(dai, Device, getLogLevel)) + .def("setSystemInformationLoggingRate", [](Device& d, float hz) { py::gil_scoped_release release; d.setSystemInformationLoggingRate(hz); }, py::arg("rateHz"), DOC(dai, Device, setSystemInformationLoggingRate)) + .def("getSystemInformationLoggingRate", [](Device& d) { py::gil_scoped_release release; return d.getSystemInformationLoggingRate(); }, DOC(dai, Device, getSystemInformationLoggingRate)) + .def("getDdrMemoryUsage", [](Device& d) { py::gil_scoped_release release; return d.getDdrMemoryUsage(); }, DOC(dai, Device, getDdrMemoryUsage)) + .def("getCmxMemoryUsage", [](Device& d) { py::gil_scoped_release release; return d.getCmxMemoryUsage(); }, DOC(dai, Device, getCmxMemoryUsage)) + .def("getLeonCssHeapUsage", [](Device& d) { py::gil_scoped_release release; return d.getLeonCssHeapUsage(); }, DOC(dai, Device, getLeonCssHeapUsage)) + .def("getLeonMssHeapUsage", [](Device& d) { py::gil_scoped_release release; return d.getLeonMssHeapUsage(); }, DOC(dai, Device, getLeonMssHeapUsage)) + .def("getChipTemperature", [](Device& d) { py::gil_scoped_release release; return d.getChipTemperature(); }, DOC(dai, Device, getChipTemperature)) + .def("getLeonCssCpuUsage", [](Device& d) { py::gil_scoped_release release; return d.getLeonCssCpuUsage(); }, DOC(dai, Device, getLeonCssCpuUsage)) + .def("getLeonMssCpuUsage", [](Device& d) { py::gil_scoped_release release; return d.getLeonMssCpuUsage(); }, DOC(dai, Device, getLeonMssCpuUsage)) + .def("setLogOutputLevel", [](Device& d, LogLevel l) { py::gil_scoped_release release; return d.setLogOutputLevel(l); }, py::arg("level"), DOC(dai, Device, setLogOutputLevel)) + .def("getLogOutputLevel", [](Device& d) { py::gil_scoped_release release; return d.getLogOutputLevel(); }, DOC(dai, Device, getLogOutputLevel)) + .def("addLogCallback", [](Device& d, std::function callback) { py::gil_scoped_release release; return d.addLogCallback(callback); }, py::arg("callback"), DOC(dai, Device, addLogCallback)) + .def("removeLogCallback", [](Device& d, int cbId) { py::gil_scoped_release release; return d.removeLogCallback(cbId); }, py::arg("callbackId"), DOC(dai, Device, removeLogCallback)) ; } \ No newline at end of file diff --git a/src/DeviceBootloaderBindings.cpp b/src/DeviceBootloaderBindings.cpp index f39de7f91..ba4a5da63 100644 --- a/src/DeviceBootloaderBindings.cpp +++ b/src/DeviceBootloaderBindings.cpp @@ -37,11 +37,11 @@ void DeviceBootloaderBindings::bind(pybind11::module& m){ .def(py::init(), py::arg("deviceDesc"), DOC(dai, DeviceBootloader, DeviceBootloader)) .def(py::init(), py::arg("deviceDesc"), py::arg("pathToCmd"), DOC(dai, DeviceBootloader, DeviceBootloader, 2)) - .def("flash", &DeviceBootloader::flash, py::arg("progressCallback"), py::arg("pipeline"), DOC(dai, DeviceBootloader, flash)) - .def("flashDepthaiApplicationPackage", &DeviceBootloader::flashDepthaiApplicationPackage, py::arg("progressCallback"), py::arg("package"), DOC(dai, DeviceBootloader, flashDepthaiApplicationPackage)) - .def("flashBootloader", &DeviceBootloader::flashBootloader, py::arg("progressCallback"), py::arg("path") = "", DOC(dai, DeviceBootloader, flashBootloader)) - .def("getVersion", &DeviceBootloader::getVersion, DOC(dai, DeviceBootloader, getVersion)) - .def("isEmbeddedVersion", &DeviceBootloader::isEmbeddedVersion, DOC(dai, DeviceBootloader, isEmbeddedVersion)) + .def("flash", [](DeviceBootloader& db, std::function progressCallback, Pipeline& pipeline) { py::gil_scoped_release release; return db.flash(progressCallback, pipeline); }, py::arg("progressCallback"), py::arg("pipeline"), DOC(dai, DeviceBootloader, flash)) + .def("flashDepthaiApplicationPackage", [](DeviceBootloader& db, std::function progressCallback, std::vector package) { py::gil_scoped_release release; return db.flashDepthaiApplicationPackage(progressCallback, package); }, py::arg("progressCallback"), py::arg("package"), DOC(dai, DeviceBootloader, flashDepthaiApplicationPackage)) + .def("flashBootloader", [](DeviceBootloader& db, std::function progressCallback, std::string path) { py::gil_scoped_release release; return db.flashBootloader(progressCallback, path); }, py::arg("progressCallback"), py::arg("path") = "", DOC(dai, DeviceBootloader, flashBootloader)) + .def("getVersion", [](DeviceBootloader& db) { py::gil_scoped_release release; return db.getVersion(); }, DOC(dai, DeviceBootloader, getVersion)) + .def("isEmbeddedVersion", [](DeviceBootloader& db) { py::gil_scoped_release release; return db.isEmbeddedVersion(); }, DOC(dai, DeviceBootloader, isEmbeddedVersion)) ; } \ No newline at end of file From 9873ceea8674755dff68eba1a8970204bde3591b Mon Sep 17 00:00:00 2001 From: Jon Ngai Date: Wed, 7 Apr 2021 16:14:56 -0600 Subject: [PATCH 12/40] Updating firmware and adding a check to raw PoBuf parsing. --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index fde7fd8a5..55033f23a 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit fde7fd8a5682ed1cc1afc6c700de8e5b6b13c81a +Subproject commit 55033f23a5067a67cc2c4640ca701011a02bb73a From 32b38e58faf6e597cef9068cc0ad40a635e15313 Mon Sep 17 00:00:00 2001 From: Jon Ngai Date: Fri, 9 Apr 2021 15:04:56 -0600 Subject: [PATCH 13/40] Update firmware (bug with multiple scripting nodes). --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 55033f23a..9e739d88f 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 55033f23a5067a67cc2c4640ca701011a02bb73a +Subproject commit 9e739d88f649c363c7da98a4dac52250d6ff8619 From 8c8e75614bebfab6803298fd66dcede376b849ed Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Tue, 13 Apr 2021 23:47:02 +0200 Subject: [PATCH 14/40] Updated core --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 9e739d88f..46d0d9123 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 9e739d88f649c363c7da98a4dac52250d6ff8619 +Subproject commit 46d0d912319e3e49bad14d6623574bbf032bbfb3 From 8c9d95ed727bd2efa81c5a3b7915a534994a6a5a Mon Sep 17 00:00:00 2001 From: Jon Ngai Date: Wed, 14 Apr 2021 09:10:45 -0600 Subject: [PATCH 15/40] Updating firmware (Fixing datetime on ImgFrame::getTimestamp) --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 46d0d9123..aae696a3d 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 46d0d912319e3e49bad14d6623574bbf032bbfb3 +Subproject commit aae696a3dbed018f1ac927ce5c27c63e4f9db72e From ee51e73f09923712cf054e9b21d7cbce125eff18 Mon Sep 17 00:00:00 2001 From: Jon Ngai Date: Wed, 28 Apr 2021 14:49:45 -0600 Subject: [PATCH 16/40] Changes to get SPIIn working (WIP) --- CMakeLists.txt | 5 +++++ depthai-core | 2 +- src/pipeline/NodeBindings.cpp | 7 +++++++ src/pipeline/PipelineBindings.cpp | 2 ++ 4 files changed, 15 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index aacf6cbf5..09c3e2569 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,5 +1,10 @@ cmake_minimum_required(VERSION 3.4) # For Hunter +set(PYTHON_EXECUTABLE "/usr/bin/python3.6") +set(PYTHON_INCLUDE_DIR "/usr/include/python3.6") +set(PYTHON_LIBRARY "/usr/lib/x86_64-linux-gnu/libpython3.6.so") + + # Set defaults # PIC toolchain as we are building a shared library set(CMAKE_TOOLCHAIN_FILE "${CMAKE_CURRENT_LIST_DIR}/cmake/toolchain/pic.cmake" CACHE STRING "") diff --git a/depthai-core b/depthai-core index bffa915bd..e251fe9ba 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit bffa915bdaac684511c80c52f9fcc5c69f2271cd +Subproject commit e251fe9ba538c6f9e4246bd142c278fe2ee5e320 diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index 68fc59723..489096980 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -10,6 +10,7 @@ #include "depthai/pipeline/node/VideoEncoder.hpp" #include "depthai/pipeline/node/ImageManip.hpp" #include "depthai/pipeline/node/SPIOut.hpp" +#include "depthai/pipeline/node/SPIIn.hpp" #include "depthai/pipeline/node/DetectionNetwork.hpp" #include "depthai/pipeline/node/SystemLogger.hpp" #include "depthai/pipeline/node/SpatialLocationCalculator.hpp" @@ -369,6 +370,12 @@ void NodeBindings::bind(pybind11::module& m){ .def("setStreamName", &SPIOut::setStreamName, py::arg("name"), DOC(dai, node, SPIOut, setStreamName)) .def("setBusId", &SPIOut::setBusId, py::arg("id"), DOC(dai, node, SPIOut, setBusId)) ; + // SPIIn node + py::class_>(m, "SPIIn", DOC(dai, node, SPIIn)) + .def_readonly("out", &SPIIn::out, DOC(dai, node, SPIIn, input)) + .def("setStreamName", &SPIIn::setStreamName, py::arg("name"), DOC(dai, node, SPIIn, setStreamName)) + .def("setBusId", &SPIIn::setBusId, py::arg("id"), DOC(dai, node, SPIIn, setBusId)) + ; py::class_>(m, "DetectionNetwork", DOC(dai, node, DetectionNetwork)) .def_readonly("input", &DetectionNetwork::input, DOC(dai, node, DetectionNetwork, input)) diff --git a/src/pipeline/PipelineBindings.cpp b/src/pipeline/PipelineBindings.cpp index 8d2f8ec09..81b7c2663 100644 --- a/src/pipeline/PipelineBindings.cpp +++ b/src/pipeline/PipelineBindings.cpp @@ -10,6 +10,7 @@ #include "depthai/pipeline/node/ColorCamera.hpp" #include "depthai/pipeline/node/VideoEncoder.hpp" #include "depthai/pipeline/node/SPIOut.hpp" +#include "depthai/pipeline/node/SPIIn.hpp" #include "depthai/pipeline/node/ImageManip.hpp" #include "depthai/pipeline/node/MonoCamera.hpp" #include "depthai/pipeline/node/StereoDepth.hpp" @@ -66,6 +67,7 @@ void PipelineBindings::bind(pybind11::module& m){ .def("createColorCamera", &Pipeline::create) .def("createVideoEncoder", &Pipeline::create) .def("createSPIOut", &Pipeline::create) + .def("createSPIIn", &Pipeline::create) .def("createImageManip", &Pipeline::create) .def("createMonoCamera", &Pipeline::create) .def("createStereoDepth", &Pipeline::create) From 06de59f900273610de673c5152082ea589fa1e9c Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Tue, 18 May 2021 03:13:37 +0200 Subject: [PATCH 17/40] Fixed some SPIIn documentation --- depthai-core | 2 +- src/pipeline/NodeBindings.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/depthai-core b/depthai-core index 0761d575f..6bb99cf93 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 0761d575fa6a367084089f568a16e67717a01d24 +Subproject commit 6bb99cf93699ab9940beabe2f950656de2e7c602 diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index fb17e56a4..1ff1a62ed 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -373,7 +373,7 @@ void NodeBindings::bind(pybind11::module& m){ ; // SPIIn node py::class_>(m, "SPIIn", DOC(dai, node, SPIIn)) - .def_readonly("out", &SPIIn::out, DOC(dai, node, SPIIn, input)) + .def_readonly("out", &SPIIn::out, DOC(dai, node, SPIIn, out)) .def("setStreamName", &SPIIn::setStreamName, py::arg("name"), DOC(dai, node, SPIIn, setStreamName)) .def("setBusId", &SPIIn::setBusId, py::arg("id"), DOC(dai, node, SPIIn, setBusId)) ; From 850ca14100b773b859986fb662fdcc887082047d Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Tue, 18 May 2021 14:02:54 +0200 Subject: [PATCH 18/40] CI fix for docstring build --- .github/workflows/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 4d294de5a..76088de05 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -48,7 +48,7 @@ jobs: sudo apt install libusb-1.0-0-dev python -m pip install -r docs/requirements_mkdoc.txt - name: Configure project - run: cmake -S . -B build -DDEPTHAI_PYTHON_FORCE_DOCSTRINGS=ON -DDEPTHAI_PYTHON_DOCSTRINGS_OUTPUT="$PWD/docstrings/depthai_python_docstring.hpp" + run: cmake -S . -B build -DPYBIND11_FINDPYTHON=ON -DDEPTHAI_PYTHON_FORCE_DOCSTRINGS=ON -DDEPTHAI_PYTHON_DOCSTRINGS_OUTPUT="$PWD/docstrings/depthai_python_docstring.hpp" - name: Build target 'pybind11_mkdoc' run: cmake --build build --target pybind11_mkdoc --parallel 8 - name: Upload docstring artifacts From f315effd6a7011d247971d09ce3a79cc5f7aca2c Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Tue, 18 May 2021 14:13:46 +0200 Subject: [PATCH 19/40] Removed temporary development settings --- CMakeLists.txt | 5 ----- 1 file changed, 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c5bfb34bc..109b64718 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,10 +1,5 @@ cmake_minimum_required(VERSION 3.4) # For Hunter -set(PYTHON_EXECUTABLE "/usr/bin/python3.6") -set(PYTHON_INCLUDE_DIR "/usr/include/python3.6") -set(PYTHON_LIBRARY "/usr/lib/x86_64-linux-gnu/libpython3.6.so") - - # Set defaults # PIC toolchain as we are building a shared library set(CMAKE_TOOLCHAIN_FILE "${CMAKE_CURRENT_LIST_DIR}/cmake/toolchain/pic.cmake" CACHE STRING "") From 64ef69c37ccccc2cc3acae01e1f916155e1ddd8a Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Tue, 18 May 2021 14:23:39 +0200 Subject: [PATCH 20/40] Reverted CI docstring changes --- .github/workflows/main.yml | 2 +- depthai-core | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 76088de05..4d294de5a 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -48,7 +48,7 @@ jobs: sudo apt install libusb-1.0-0-dev python -m pip install -r docs/requirements_mkdoc.txt - name: Configure project - run: cmake -S . -B build -DPYBIND11_FINDPYTHON=ON -DDEPTHAI_PYTHON_FORCE_DOCSTRINGS=ON -DDEPTHAI_PYTHON_DOCSTRINGS_OUTPUT="$PWD/docstrings/depthai_python_docstring.hpp" + run: cmake -S . -B build -DDEPTHAI_PYTHON_FORCE_DOCSTRINGS=ON -DDEPTHAI_PYTHON_DOCSTRINGS_OUTPUT="$PWD/docstrings/depthai_python_docstring.hpp" - name: Build target 'pybind11_mkdoc' run: cmake --build build --target pybind11_mkdoc --parallel 8 - name: Upload docstring artifacts diff --git a/depthai-core b/depthai-core index 6bb99cf93..ecfab8d04 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 6bb99cf93699ab9940beabe2f950656de2e7c602 +Subproject commit ecfab8d044bac9e687b68eb00d9962b1c48678a5 From c90abbb7634da0042375560362d1c30322634101 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Fri, 11 Jun 2021 17:48:18 +0200 Subject: [PATCH 21/40] Updated core --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 530d6ab62..37fd24f3b 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 530d6ab626ca4f7de3a50a027239a7759a478441 +Subproject commit 37fd24f3beddb0bbf904acb09f7966a240ba67e8 From 761d5e600ceebd0a516e4466800313fae22a7ffa Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Thu, 24 Jun 2021 16:42:38 +0200 Subject: [PATCH 22/40] Updated core --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index b04dcc1b7..2b9d79c34 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit b04dcc1b7d643e93924a6a2dcba8a212e60ba660 +Subproject commit 2b9d79c34b68a44a79d856477925caaca9eeb4f7 From 7baf504970e45463bba90ef687a27f769d29bb8d Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Fri, 25 Jun 2021 15:12:50 +0300 Subject: [PATCH 23/40] Update core --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 2b9d79c34..6f21c8976 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 2b9d79c34b68a44a79d856477925caaca9eeb4f7 +Subproject commit 6f21c8976b8601eaa4205ffffde7683098ccfa1f From c30993dd7191e9d24f19c600feae62d733a4f19f Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Sat, 26 Jun 2021 02:47:16 +0200 Subject: [PATCH 24/40] Updated core with Script improvements --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 2b9d79c34..d2833417c 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 2b9d79c34b68a44a79d856477925caaca9eeb4f7 +Subproject commit d2833417cde77b7c4cdf6e91794b60e72e22352b From 2fcf18a624a8ffdf69589a554d4a4642f8fddc2b Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Thu, 15 Jul 2021 20:08:52 +0200 Subject: [PATCH 25/40] Updated core --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 799c56ac1..540b3ca32 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 799c56ac1712151b42f72ddf3b64a557f447991a +Subproject commit 540b3ca32ef212c37040390d3ea9ade76d183b57 From eb250942099c1914206f0602403dee154261ea2e Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Fri, 16 Jul 2021 15:35:58 +0200 Subject: [PATCH 26/40] Updated core --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 540b3ca32..bca935bf8 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 540b3ca32ef212c37040390d3ea9ade76d183b57 +Subproject commit bca935bf8aa5c3f67572d9f9cc9b851e4f6d1724 From 4ad4bd8b69bedb5514a2977091fbbc053a85ce8e Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Fri, 16 Jul 2021 22:49:49 +0200 Subject: [PATCH 27/40] Updated core --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index bca935bf8..ae9be55db 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit bca935bf8aa5c3f67572d9f9cc9b851e4f6d1724 +Subproject commit ae9be55db4b10b044004a57f39c259e448e40bde From 2068884ec9e39ef55d0011ab8834d6e317ea8a88 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Mon, 19 Jul 2021 19:16:56 +0200 Subject: [PATCH 28/40] Updated EdgeDetector docs --- docs/source/components/nodes/edge_detector.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/components/nodes/edge_detector.rst b/docs/source/components/nodes/edge_detector.rst index 6c30c0528..d847de9e0 100644 --- a/docs/source/components/nodes/edge_detector.rst +++ b/docs/source/components/nodes/edge_detector.rst @@ -11,7 +11,7 @@ How to place it .. code-tab:: py pipeline = dai.Pipeline() - edgeDetector = pipeline.createEdgeDetector() + edgeDetector = pipeline.create(dai.node.EdgeDetector) .. code-tab:: c++ @@ -48,7 +48,7 @@ Usage .. code-tab:: py pipeline = dai.Pipeline() - edgeDetector = pipeline.createEdgeDetector() + edgeDetector = pipeline.create(dai.node.EdgeDetector) sobelHorizontalKernel = [[1, 0, -1], [2, 0, -2], [1, 0, -1]] sobelVerticalKernel = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]] @@ -75,7 +75,7 @@ Reference .. tab:: Python - .. autoclass:: depthai.EdgeDetector + .. autoclass:: depthai.node.EdgeDetector :members: :inherited-members: :noindex: From 6c5524912bf48bf8e44ce20441c614d5544e3433 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Mon, 19 Jul 2021 21:56:36 +0200 Subject: [PATCH 29/40] Updated core --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index 0cc861fc2..1991185f7 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 0cc861fc29bc21e1df420a473a9cf21e6e87ecb2 +Subproject commit 1991185f7c7a8885bf6ab8ef2382910670133146 From 62faa233a290a7cbd701e09aa590f273867db67a Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Wed, 21 Jul 2021 11:24:05 +0200 Subject: [PATCH 30/40] Updated pybind11 to 2.7.0, modifed example and fixed some bindings --- cmake/Hunter/config.cmake | 8 ++++---- depthai-core | 2 +- examples/CMakeLists.txt | 2 +- examples/script_camera_control.py | 20 +++++++++----------- src/DeviceBindings.cpp | 8 ++++++-- src/pipeline/NodeBindings.cpp | 4 ++-- 6 files changed, 23 insertions(+), 21 deletions(-) diff --git a/cmake/Hunter/config.cmake b/cmake/Hunter/config.cmake index f74300ab0..396b38b5b 100644 --- a/cmake/Hunter/config.cmake +++ b/cmake/Hunter/config.cmake @@ -1,7 +1,7 @@ -# Temporary pybind11 2.6.3dev1 chrono bindings patch +# Pybind11 2.7.0 hunter_config( pybind11 - VERSION "2.6.3dev1" - URL "https://github.com/pybind/pybind11/archive/54430436fee2afc4f8443691075a6208f9ea8eba.tar.gz" - SHA1 "c8550f7d77e92045c996d17f1d214223d1e2e620" + VERSION "2.7.0" + URL "https://github.com/pybind/pybind11/archive/refs/tags/v2.7.0.tar.gz" + SHA1 "3a7010e5952c56e08c8f9b7d6fb458a173fd585a" ) diff --git a/depthai-core b/depthai-core index 1077cb17d..25aa74f6d 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 1077cb17db2f2c40921bfd4b70e98899d2883d49 +Subproject commit 25aa74f6de9876dadfc55c748d22f44601087f41 diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 995f71ebd..dd2ec34a6 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -53,7 +53,7 @@ function(add_python_example example_name python_script_path) "PYTHONPATH=$${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}" # ASAN in case of sanitizers ${ASAN_ENVIRONMENT_VARS} - ${CMAKE_COMMAND} -DTIMEOUT_SECONDS=5 -P ${CMAKE_CURRENT_LIST_DIR}/cmake/ExecuteTestTimeout.cmake + ${CMAKE_COMMAND} -DTIMEOUT_SECONDS=10 -P ${CMAKE_CURRENT_LIST_DIR}/cmake/ExecuteTestTimeout.cmake # Actual script to run ${PYTHON_EXECUTABLE} -Werror "${CMAKE_CURRENT_LIST_DIR}/${python_script_path}" ${arguments} ) diff --git a/examples/script_camera_control.py b/examples/script_camera_control.py index 07f7cded3..f0044bb90 100644 --- a/examples/script_camera_control.py +++ b/examples/script_camera_control.py @@ -10,13 +10,13 @@ # Script node script = pipeline.create(dai.node.Script) -script.setScriptData(""" -import time -ctrl = CameraControl() -ctrl.setCaptureStill(True) -while True: - time.sleep(1) - node.io['out'].send(ctrl) +script.setScript(""" + import time + ctrl = CameraControl() + ctrl.setCaptureStill(True) + while True: + time.sleep(1) + node.io['out'].send(ctrl) """) # XLinkOut @@ -27,12 +27,10 @@ script.outputs['out'].link(cam.inputControl) cam.still.link(xout.input) -# Connect to device and start pipeline +# Connect to device with pipeline with dai.Device(pipeline) as device: - # Start pipeline - device.startPipeline() while True: img = device.getOutputQueue("still").get() cv2.imshow('still', img.getCvFrame()) if cv2.waitKey(1) == ord('q'): - exit(0) + break diff --git a/src/DeviceBindings.cpp b/src/DeviceBindings.cpp index 589b1b1c0..dcf6e782b 100644 --- a/src/DeviceBindings.cpp +++ b/src/DeviceBindings.cpp @@ -229,6 +229,12 @@ void DeviceBindings::bind(pybind11::module& m){ }, py::arg("timeout") = std::chrono::microseconds(-1), DOC(dai, Device, getQueueEvent, 4)) //.def("setCallback", DeviceWrapper::wrap(&Device::setCallback), py::arg("name"), py::arg("callback")) + + // Doesn't require GIL release (eg, don't do RPC or long blocking things in background) + .def("setLogOutputLevel", &Device::setLogOutputLevel, py::arg("level"), DOC(dai, Device, setLogOutputLevel)) + .def("getLogOutputLevel", &Device::getLogOutputLevel, DOC(dai, Device, getLogOutputLevel)) + + // Requires GIL release .def("setLogLevel", [](Device& d, LogLevel l) { py::gil_scoped_release release; d.setLogLevel(l); }, py::arg("level"), DOC(dai, Device, setLogLevel)) .def("getLogLevel", [](Device& d) { py::gil_scoped_release release; return d.getLogLevel(); }, DOC(dai, Device, getLogLevel)) .def("setSystemInformationLoggingRate", [](Device& d, float hz) { py::gil_scoped_release release; d.setSystemInformationLoggingRate(hz); }, py::arg("rateHz"), DOC(dai, Device, setSystemInformationLoggingRate)) @@ -241,8 +247,6 @@ void DeviceBindings::bind(pybind11::module& m){ .def("getChipTemperature", [](Device& d) { py::gil_scoped_release release; return d.getChipTemperature(); }, DOC(dai, Device, getChipTemperature)) .def("getLeonCssCpuUsage", [](Device& d) { py::gil_scoped_release release; return d.getLeonCssCpuUsage(); }, DOC(dai, Device, getLeonCssCpuUsage)) .def("getLeonMssCpuUsage", [](Device& d) { py::gil_scoped_release release; return d.getLeonMssCpuUsage(); }, DOC(dai, Device, getLeonMssCpuUsage)) - .def("setLogOutputLevel", [](Device& d, LogLevel l) { py::gil_scoped_release release; return d.setLogOutputLevel(l); }, py::arg("level"), DOC(dai, Device, setLogOutputLevel)) - .def("getLogOutputLevel", [](Device& d) { py::gil_scoped_release release; return d.getLogOutputLevel(); }, DOC(dai, Device, getLogOutputLevel)) .def("addLogCallback", [](Device& d, std::function callback) { py::gil_scoped_release release; return d.addLogCallback(callback); }, py::arg("callback"), DOC(dai, Device, addLogCallback)) .def("removeLogCallback", [](Device& d, int cbId) { py::gil_scoped_release release; return d.removeLogCallback(cbId); }, py::arg("callbackId"), DOC(dai, Device, removeLogCallback)) .def("getUsbSpeed", [](Device& d) { py::gil_scoped_release release; return d.getUsbSpeed(); }, DOC(dai, Device, getUsbSpeed)) diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index 2021ec2d1..a353475bd 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -902,8 +902,8 @@ void NodeBindings::bind(pybind11::module& m){ .def_readonly("inputs", &Script::inputs) .def_readonly("outputs", &Script::outputs) .def("setScriptPath", &Script::setScriptPath, DOC(dai, node, Script, setScriptPath)) - .def("setScriptData", static_cast(&Script::setScriptData), py::arg("script"), py::arg("name") = "", DOC(dai, node, Script, setScriptData)) - .def("setScriptData", static_cast&, const std::string&)>(&Script::setScriptData), py::arg("data"), py::arg("name") = "", DOC(dai, node, Script, setScriptData, 2)) + .def("setScript", py::overload_cast(&Script::setScript), py::arg("script"), py::arg("name") = "", DOC(dai, node, Script, setScript)) + .def("setScript", py::overload_cast&, const std::string&>(&Script::setScript), py::arg("data"), py::arg("name") = "", DOC(dai, node, Script, setScript, 2)) .def("getScriptPath", &Script::getScriptPath, DOC(dai, node, Script, getScriptPath)) .def("getScriptName", &Script::getScriptName, DOC(dai, node, Script, getScriptName)) .def("setProcessor", &Script::setProcessor, DOC(dai, node, Script, setProcessor)) From eba48706939518c705b5976abbef90cbb734a68a Mon Sep 17 00:00:00 2001 From: Erol444 Date: Thu, 22 Jul 2021 17:17:06 +0200 Subject: [PATCH 31/40] Added script node docs --- docs/source/components/nodes/script.rst | 120 ++++++++++++++++++ docs/source/prerelease/script.txt | 56 -------- docs/source/samples/script_camera_control.rst | 42 ++++++ docs/source/tutorials/code_samples.rst | 1 + docs/source/tutorials/simple_samples.rst | 1 + 5 files changed, 164 insertions(+), 56 deletions(-) create mode 100644 docs/source/components/nodes/script.rst delete mode 100644 docs/source/prerelease/script.txt create mode 100644 docs/source/samples/script_camera_control.rst diff --git a/docs/source/components/nodes/script.rst b/docs/source/components/nodes/script.rst new file mode 100644 index 000000000..e31652e7f --- /dev/null +++ b/docs/source/components/nodes/script.rst @@ -0,0 +1,120 @@ +Script +====== + +Script node allows users to run **custom Python scripts on the device**. Due to the computational resource constraints, +script node shouldn't be used for heavy computing (eg. image manipulation/CV), but for managing the flow +of the pipeline. Example use cases would be controlling nodes like :ref:`ImageManip`, :ref:`ColorCamera`, :ref:`SpatialLocationCalculator`, +decoding :ref:`NeuralNetwork` results, or interfacing with GPIOs. + +How to place it +############### + +.. tabs:: + + .. code-tab:: py + + pipeline = dai.Pipeline() + script = pipeline.create(dai.node.Script) + + .. code-tab:: c++ + + dai::Pipeline pipeline; + auto script = pipeline.create(); + + +Inputs and Outputs +################## + +.. code-block:: + + ┌──────────────┐ + │ │ + inputs[] │ │ outputs[] + ---------►│ Script ├-----------► + │ │ + │ │ + └──────────────┘ + +Users can define as many inputs and outputs as they need. Inputs and outputs can be any :ref:`Message ` type. + +Usage +##### + +.. tabs:: + + .. code-tab:: py + + script = pipeline.create(dai.node.Script) + script.setScript(""" + import time + import marshal + num = 123 + node.warn(f"Number {num}") # Print to host + x = [1, "Hello", {"Foo": "Bar"}] + x_serial = marshal.dumps(x) + b = Buffer(len(x_serial)) + while True: + time.sleep(1) + b.getData()[:] = x_serial + node.io['out'].send(b) + """) + script.outputs['out'].link(xout.input) + + # ... + # After initializing the device, enable log levels + device.setLogLevel(dai.LogLevel.WARN) + device.setLogOutputLevel(dai.LogLevel.WARN) + + .. code-tab:: c++ + + auto script = pipeline.create(); + script->setScript(R"( + import time + import marshal + num = 123 + node.warn(f"Number {num}") # Print to host + x = [1, "Hello", {"Foo": "Bar"}] + x_serial = marshal.dumps(x) + b = Buffer(len(x_serial)) + while True: + time.sleep(1) + b.getData()[:] = x_serial + node.io['out'].send(b) + )"); + script->outputs["out"].link(xout->input); + + // ... + // After initializing the device, enable log levels + device.setLogLevel(dai::LogLevel.WARN); + device.setLogOutputLevel(dai::LogLevel.WARN); + + + +Examples of functionality +######################### + +- :ref:`Script camera control` +- `Triangulation experiment `__ +- `Movenet decoding (edge mode) `__ - A bit more complex example by geaxgx + +Reference +######### + +.. tabs:: + + .. tab:: Python + + .. autoclass:: depthai.node.Script + :members: + :inherited-members: + :noindex: + + .. tab:: C++ + + .. doxygenclass:: dai::node::Script + :project: depthai-core + :members: + :private-members: + :undoc-members: + +.. include:: ../../includes/footer-short.rst diff --git a/docs/source/prerelease/script.txt b/docs/source/prerelease/script.txt deleted file mode 100644 index b375b7370..000000000 --- a/docs/source/prerelease/script.txt +++ /dev/null @@ -1,56 +0,0 @@ -Script -====== - -Scripting node enables users to write scripts that will run on the device. - -How to place it -############### - -.. tabs:: - - .. code-tab:: py - - pipeline = dai.Pipeline() - script = pipeline.create(dai.node.Script) - - .. code-tab:: c++ - - dai::Pipeline pipeline; - auto script = pipeline.create(); - -Demo -#### - -.. code-block: python - - feed_manip_config_script = pipeline.create(dai.node.Script) - feed_manip_config_script.setScriptData(""" - score, bb_cx, bb_cy, bb_w, rect_cx, rect_cy, rect_w, rotation = node.io['in'].get().getLayerFp16("result") - rr = RotatedRect() - rr.center.x = rect_cx - rr.center.y = rect_cy - rr.size.width = rect_w - rr.size.height = rect_w - rr.angle = rotation - cfg = ImageManipConfig() - cfg.setCropRotatedRect(rr, True) - cfg.setResize(224, 224) - node.io['out'].send(cfg) - """) - pp_nn.out.link(feed_manip_config_script.inputs['in']) - feed_manip_config_script.outputs['out'].link(pre_lm_manip.inputConfig) - -.. code-block: python - - script = pipeline.create(dai.node.Script) - script.setScriptData(""" - # Logging to the host can be enable with - # node.trace, node.debug, node.warn, node.error, node.critical - node.trace("Hello World") - """) - - # After initializing the device, enable log levels - device.setLogLevel(dai.LogLevel.WARN) - device.setLogOutputLevel(dai.LogLevel.WARN) - -.. include:: ../../includes/footer-short.rst diff --git a/docs/source/samples/script_camera_control.rst b/docs/source/samples/script_camera_control.rst new file mode 100644 index 000000000..01672c55a --- /dev/null +++ b/docs/source/samples/script_camera_control.rst @@ -0,0 +1,42 @@ +Script camera control +===================== + +This example shows how to use :ref:`Script` node. It controls the :ref:`ColorCamera` to +capture a still image every second. + +Demo +#### + +.. raw:: html + +
+ +
+ +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../examples/script_camera_control.py + :language: python + :linenos: + + .. tab:: C++ + + Also `available on GitHub `__ + + .. literalinclude:: ../../../depthai-core/examples/src/script_camera_control.cpp + :language: cpp + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/docs/source/tutorials/code_samples.rst b/docs/source/tutorials/code_samples.rst index 8cb2bcd17..416e91e97 100644 --- a/docs/source/tutorials/code_samples.rst +++ b/docs/source/tutorials/code_samples.rst @@ -30,6 +30,7 @@ Code samples are used for automated testing. They are also a great starting poin - :ref:`IMU Accelerometer & Gyroscope` - Accelerometer and gyroscope at 500hz rate - :ref:`IMU Rotation Vector` - Rotation vector at 400 hz rate - :ref:`Edge detector` - Edge detection on input frame +- :ref:`Script camera control` - Controlling the camera with the Script node .. rubric:: Complex diff --git a/docs/source/tutorials/simple_samples.rst b/docs/source/tutorials/simple_samples.rst index 80c87b185..69853f745 100644 --- a/docs/source/tutorials/simple_samples.rst +++ b/docs/source/tutorials/simple_samples.rst @@ -22,6 +22,7 @@ Simple ../samples/imu_accelerometer_gyroscope.rst ../samples/imu_rotation_vector.rst ../samples/edge_detector.rst + ../samples/script_camera_control.rst These samples are great starting point for the gen2 API. From c14009591e5be063b23a545f434aedef40f4a133 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Thu, 22 Jul 2021 17:20:36 +0200 Subject: [PATCH 32/40] Added demo gif --- docs/source/samples/script_camera_control.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/samples/script_camera_control.rst b/docs/source/samples/script_camera_control.rst index 01672c55a..aec697376 100644 --- a/docs/source/samples/script_camera_control.rst +++ b/docs/source/samples/script_camera_control.rst @@ -10,7 +10,7 @@ Demo .. raw:: html
- +
Setup From 061b47d5220892b849de1b5ec04b31dd342be0c0 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Fri, 23 Jul 2021 14:40:55 +0200 Subject: [PATCH 33/40] Added bindings for properties --- depthai-core | 2 +- src/pipeline/NodeBindings.cpp | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index bcc7e6eff..a8e5de844 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit bcc7e6efffb741bc71c67353c06cbd0f8be80f86 +Subproject commit a8e5de84437b862cc72f62483a0a9a6abd162da5 diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index a84a832f7..cf5876b67 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -444,6 +444,24 @@ void NodeBindings::bind(pybind11::module& m){ .def("setBusId", &SPIIn::setBusId, py::arg("id"), DOC(dai, node, SPIIn, setBusId)) ; + py::class_ spiOutProperties(m, "SPIOutProperties", DOC(dai, SPIOutProperties)); + spiOutProperties + .def_readwrite("streamName", &SPIOutProperties::streamName) + .def_readwrite("busId", &SPIOutProperties::busId) + ; + // ALIAS + m.attr("SPIOut").attr("Properties") = spiOutProperties; + + py::class_ spiInProperties(m, "SPIInProperties", DOC(dai, SPIInProperties)); + spiInProperties + .def_readwrite("streamName", &SPIInProperties::streamName) + .def_readwrite("busId", &SPIInProperties::busId) + .def_readwrite("maxDataSize", &SPIInProperties::maxDataSize) + .def_readwrite("numFrames", &SPIInProperties::numFrames) + ; + // ALIAS + m.attr("SPIIn").attr("Properties") = spiInProperties; + py::class_>(m, "DetectionNetwork", DOC(dai, node, DetectionNetwork)) .def_readonly("input", &DetectionNetwork::input, DOC(dai, node, DetectionNetwork, input)) .def_readonly("out", &DetectionNetwork::out, DOC(dai, node, DetectionNetwork, out)) From bfd8922f0428141d6eb46be847aa5c0221affe1b Mon Sep 17 00:00:00 2001 From: Erol444 Date: Fri, 23 Jul 2021 19:07:46 +0200 Subject: [PATCH 34/40] Updated script node docs - PR review suggestions --- docs/source/components/nodes/script.rst | 40 +++++++++++++++++++++---- 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/docs/source/components/nodes/script.rst b/docs/source/components/nodes/script.rst index e31652e7f..d88916181 100644 --- a/docs/source/components/nodes/script.rst +++ b/docs/source/components/nodes/script.rst @@ -27,12 +27,12 @@ Inputs and Outputs .. code-block:: - ┌──────────────┐ - │ │ - inputs[] │ │ outputs[] - ---------►│ Script ├-----------► - │ │ - │ │ + inputs[] ┌──────────────┐ outputs[] + ---------►│ ├-----------► + ---------►│ ├-----------► + ... │ Script | ... + ... │ │ ... + ---------►│ ├-----------► └──────────────┘ Users can define as many inputs and outputs as they need. Inputs and outputs can be any :ref:`Message ` type. @@ -88,6 +88,34 @@ Usage device.setLogLevel(dai::LogLevel.WARN); device.setLogOutputLevel(dai::LogLevel.WARN); +Interfacing with GPIOs +###################### + +In the script node you can interface with GPIOs of the VPU. Currently supported functions are: + +.. code-block:: python + + import GPIO # module + GPIO.read(pin) + GPIO.write(pin, value) + GPIO.setPwm(pin, highCount, lowCount, repeat=0) # repeat == 0 means indefinite + GPIO.enablePwm(pin, enable) + +Using DepthAI :ref:`Messages ` +################################################### + +The depthai module is implicitly imported to the script node. You can create new +depthai messages and assign data to it, for example: + +.. code-block:: python + + buf = Buffer(100) # Assign 100 bytes to the Buffer message + + # Create CameraControl message, set manual focus + control = CameraControl() + control.setManualFocus(100) + + imgFrame = ImgFrame(300*300*3) # Buffer with 300x300x3 bytes Examples of functionality From f945b2f1dd977bef508659d7589a920ae47a6f02 Mon Sep 17 00:00:00 2001 From: TheMarpe Date: Sat, 24 Jul 2021 01:19:13 +0200 Subject: [PATCH 35/40] Added a callstack and refactored all bindings to use two stage binding --- src/CalibrationHandlerBindings.cpp | 21 ++- src/CalibrationHandlerBindings.hpp | 2 +- src/DataQueueBindings.cpp | 27 +++- src/DataQueueBindings.hpp | 2 +- src/DatatypeBindings.cpp | 217 +++++++++++++++++--------- src/DatatypeBindings.hpp | 2 +- src/DeviceBindings.cpp | 21 ++- src/DeviceBindings.hpp | 2 +- src/DeviceBootloaderBindings.cpp | 31 +++- src/DeviceBootloaderBindings.hpp | 2 +- src/XLinkConnectionBindings.cpp | 37 ++++- src/XLinkConnectionBindings.hpp | 2 +- src/log/LogBindings.cpp | 23 ++- src/log/LogBindings.hpp | 2 +- src/openvino/OpenVINOBindings.cpp | 26 ++- src/openvino/OpenVINOBindings.hpp | 2 +- src/pipeline/AssetManagerBindings.cpp | 27 +++- src/pipeline/AssetManagerBindings.hpp | 2 +- src/pipeline/CommonBindings.cpp | 66 ++++++-- src/pipeline/CommonBindings.hpp | 2 +- src/pipeline/NodeBindings.cpp | 197 ++++++++++++++--------- src/pipeline/NodeBindings.hpp | 2 +- src/pipeline/PipelineBindings.cpp | 25 ++- src/pipeline/PipelineBindings.hpp | 2 +- src/py_bindings.cpp | 31 ++-- src/pybind11_common.hpp | 3 + src/utility/ResourcesBindings.cpp | 4 +- src/utility/ResourcesBindings.hpp | 2 +- 28 files changed, 552 insertions(+), 230 deletions(-) diff --git a/src/CalibrationHandlerBindings.cpp b/src/CalibrationHandlerBindings.cpp index 0b0a4a578..b634aeccb 100644 --- a/src/CalibrationHandlerBindings.cpp +++ b/src/CalibrationHandlerBindings.cpp @@ -3,12 +3,27 @@ #include "depthai-shared/common/Point2f.hpp" #include -void CalibrationHandlerBindings::bind(pybind11::module& m){ +void CalibrationHandlerBindings::bind(pybind11::module& m, void* pCallstack){ using namespace dai; - // bind pipeline - py::class_(m, "CalibrationHandler", DOC(dai, CalibrationHandler)) + // Type definitions + py::class_ calibrationHandler(m, "CalibrationHandler", DOC(dai, CalibrationHandler)); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Bindings + calibrationHandler .def(py::init<>(), DOC(dai, CalibrationHandler, CalibrationHandler)) .def(py::init(), DOC(dai, CalibrationHandler, CalibrationHandler, 2)) .def(py::init(), DOC(dai, CalibrationHandler, CalibrationHandler, 3)) diff --git a/src/CalibrationHandlerBindings.hpp b/src/CalibrationHandlerBindings.hpp index 525dd391e..8a4fdc283 100644 --- a/src/CalibrationHandlerBindings.hpp +++ b/src/CalibrationHandlerBindings.hpp @@ -4,5 +4,5 @@ #include "pybind11_common.hpp" struct CalibrationHandlerBindings { - static void bind(pybind11::module& m); + static void bind(pybind11::module& m, void* pCallstack); }; diff --git a/src/DataQueueBindings.cpp b/src/DataQueueBindings.cpp index 528cd494e..557ae573f 100644 --- a/src/DataQueueBindings.cpp +++ b/src/DataQueueBindings.cpp @@ -6,11 +6,30 @@ // depthai #include "depthai/device/DataQueue.hpp" -void DataQueueBindings::bind(pybind11::module& m){ - +void DataQueueBindings::bind(pybind11::module& m, void* pCallstack){ using namespace dai; using namespace std::chrono; + + // Type definitions + py::class_> dataOutputQueue(m, "DataOutputQueue", DOC(dai, DataOutputQueue)); + py::class_> dataInputQueue(m, "DataInputQueue", DOC(dai, DataInputQueue)); + + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // To prevent blocking whole python interpreter, blocking functions like 'get' and 'send' // are pooled with a reasonable delay and check for python interrupt signal in between. @@ -29,7 +48,7 @@ void DataQueueBindings::bind(pybind11::module& m){ throw py::value_error("Callback must take either zero, one or two arguments"); } }; - py::class_>(m, "DataOutputQueue", DOC(dai, DataOutputQueue)) + dataOutputQueue .def("getName", &DataOutputQueue::getName, DOC(dai, DataOutputQueue, getName)) .def("isClosed", &DataOutputQueue::isClosed, DOC(dai, DataOutputQueue, isClosed)) .def("close", &DataOutputQueue::close, DOC(dai, DataOutputQueue, close)) @@ -93,7 +112,7 @@ void DataQueueBindings::bind(pybind11::module& m){ ; // Bind DataInputQueue - py::class_>(m, "DataInputQueue", DOC(dai, DataInputQueue)) + dataInputQueue .def("isClosed", &DataInputQueue::isClosed, DOC(dai, DataInputQueue, isClosed)) .def("close", &DataInputQueue::close, DOC(dai, DataInputQueue, close)) .def("getName", &DataInputQueue::getName, DOC(dai, DataInputQueue, getName)) diff --git a/src/DataQueueBindings.hpp b/src/DataQueueBindings.hpp index e1f1ad7e5..9b1586944 100644 --- a/src/DataQueueBindings.hpp +++ b/src/DataQueueBindings.hpp @@ -4,5 +4,5 @@ #include "pybind11_common.hpp" struct DataQueueBindings { - static void bind(pybind11::module& m); + static void bind(pybind11::module& m, void* pCallstack); }; diff --git a/src/DatatypeBindings.cpp b/src/DatatypeBindings.cpp index a0ac63aa8..894ff8f9a 100644 --- a/src/DatatypeBindings.cpp +++ b/src/DatatypeBindings.cpp @@ -44,13 +44,86 @@ // #include "spdlog/spdlog.h" -void DatatypeBindings::bind(pybind11::module& m){ +void DatatypeBindings::bind(pybind11::module& m, void* pCallstack){ using namespace dai; - // Bind Raw datatypes - py::class_>(m, "RawBuffer", DOC(dai, RawBuffer)) + py::class_> rawBuffer(m, "RawBuffer", DOC(dai, RawBuffer)); + py::class_> rawImgFrame(m, "RawImgFrame", DOC(dai, RawImgFrame)); + py::enum_ rawImgFrameType(rawImgFrame, "Type"); + py::class_ rawImgFrameSpecs(rawImgFrame, "Specs", DOC(dai, RawImgFrame, Specs)); + py::class_> rawNnData(m, "RawNNData", DOC(dai, RawNNData)); + py::class_ tensorInfo(m, "TensorInfo", DOC(dai, TensorInfo)); + py::enum_tensorInfoDataType(tensorInfo, "DataType"); + py::enum_tensorInfoStorageOrder(tensorInfo, "StorageOrder"); + py::class_ imgDetection(m, "ImgDetection", DOC(dai, ImgDetection)); + py::class_ spatialImgDetection(m, "SpatialImgDetection", DOC(dai, SpatialImgDetection)); + py::class_> rawImgDetections(m, "RawImgDetections", DOC(dai, RawImgDetections)); + py::class_> rawSpatialImgDetections(m, "RawSpatialImgDetections", DOC(dai, RawSpatialImgDetections)); + py::class_> rawImageManipConfig(m, "RawImageManipConfig", DOC(dai, RawImageManipConfig)); + py::class_ rawImageManipConfigCropRect(rawImageManipConfig, "CropRect", DOC(dai, RawImageManipConfig, CropRect)); + py::class_ rawImageManipCropConfig(rawImageManipConfig, "CropConfig", DOC(dai, RawImageManipConfig, CropConfig)); + py::class_rawImageManipConfigResizeConfig(rawImageManipConfig, "ResizeConfig", DOC(dai, RawImageManipConfig, ResizeConfig)); + py::class_ rawImageManipConfigFormatConfig(rawImageManipConfig, "FormatConfig", DOC(dai, RawImageManipConfig, FormatConfig)); + py::class_> rawCameraControl(m, "RawCameraControl", DOC(dai, RawCameraControl)); + py::class_ tracklet(m, "Tracklet", DOC(dai, Tracklet)); + py::enum_ trackletTrackingStatus(tracklet, "TrackingStatus", DOC(dai, Tracklet, TrackingStatus)); + py::class_> rawTacklets(m, "RawTracklets", DOC(dai, RawTracklets)); + py::class_> imuReport(m, "IMUReport", DOC(dai, IMUReport)); + py::enum_ imuReportAccuracy(imuReport, "Accuracy"); + py::class_> imuReportAccelerometer(m, "IMUReportAccelerometer", DOC(dai, IMUReportAccelerometer)); + py::class_> imuReportGyroscope(m, "IMUReportGyroscope", DOC(dai, IMUReportGyroscope)); + py::class_> imuReportMagneticField(m, "IMUReportMagneticField", DOC(dai, IMUReportMagneticField)); + py::class_> imuReportRotationVectorWAcc(m, "IMUReportRotationVectorWAcc", DOC(dai, IMUReportRotationVectorWAcc)); + py::class_ imuPacket(m, "IMUPacket", DOC(dai, IMUPacket)); + py::class_> rawIMUPackets(m, "RawIMUData", DOC(dai, RawIMUData)); + py::enum_ rawCameraControlAutoFocusMode(rawCameraControl, "AutoFocusMode", DOC(dai, RawCameraControl, AutoFocusMode)); + py::enum_ rawCameraControlAutoWhiteBalanceMode(rawCameraControl, "AutoWhiteBalanceMode", DOC(dai, RawCameraControl, AutoWhiteBalanceMode)); + py::enum_ rawCameraControlSceneMode(rawCameraControl, "SceneMode", DOC(dai, RawCameraControl, SceneMode)); + py::enum_ rawCameraControlAntiBandingMode(rawCameraControl, "AntiBandingMode", DOC(dai, RawCameraControl, AntiBandingMode)); + py::enum_ rawCameraControlEffectMode(rawCameraControl, "EffectMode", DOC(dai, RawCameraControl, EffectMode)); + py::class_> rawSystemInformation(m, "RawSystemInformation", DOC(dai, RawSystemInformation)); + py::class_> adatatype(m, "ADatatype", DOC(dai, ADatatype)); + py::class_> buffer(m, "Buffer", DOC(dai, Buffer)); + py::class_> imgFrame(m, "ImgFrame", DOC(dai, ImgFrame)); + py::class_ rotatedRect(m, "RotatedRect", DOC(dai, RotatedRect)); + py::class_> nnData(m, "NNData", DOC(dai, NNData)); + py::class_> imgDetections(m, "ImgDetections", DOC(dai, ImgDetections)); + py::class_> spatialImgDetections(m, "SpatialImgDetections", DOC(dai, SpatialImgDetections)); + py::class_> imageManipConfig(m, "ImageManipConfig", DOC(dai, ImageManipConfig)); + py::class_> cameraControl(m, "CameraControl", DOC(dai, CameraControl)); + py::class_> systemInformation(m, "SystemInformation", DOC(dai, SystemInformation)); + py::class_ spatialLocations(m, "SpatialLocations", DOC(dai, SpatialLocations)); + py::class_ rect(m, "Rect", DOC(dai, Rect)); + py::class_ spatialLocationCalculatorConfigThresholds(m, "SpatialLocationCalculatorConfigThresholds", DOC(dai, SpatialLocationCalculatorConfigThresholds)); + py::class_ spatialLocationCalculatorConfigData(m, "SpatialLocationCalculatorConfigData", DOC(dai, SpatialLocationCalculatorConfigData)); + py::class_> spatialLocationCalculatorData(m, "SpatialLocationCalculatorData", DOC(dai, SpatialLocationCalculatorData)); + py::class_> spatialLocationCalculatorConfig(m, "SpatialLocationCalculatorConfig", DOC(dai, SpatialLocationCalculatorConfig)); + py::class_> tracklets(m, "Tracklets", DOC(dai, Tracklets)); + py::class_> imuData(m, "IMUData", DOC(dai, IMUData)); + py::class_> rawStereoDepthConfig(m, "RawStereoDepthConfig", DOC(dai, RawStereoDepthConfig)); + py::class_> stereoDepthConfig(m, "StereoDepthConfig", DOC(dai, StereoDepthConfig)); + py::class_ edgeDetectorConfigData(m, "EdgeDetectorConfigData", DOC(dai, EdgeDetectorConfigData)); + py::class_> rawEdgeDetectorConfig(m, "RawEdgeDetectorConfig", DOC(dai, RawEdgeDetectorConfig)); + py::class_> edgeDetectorConfig(m, "EdgeDetectorConfig", DOC(dai, EdgeDetectorConfig)); + + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + + rawBuffer .def(py::init<>()) .def_property("data", [](py::object &obj){ dai::RawBuffer &a = obj.cast(); @@ -62,8 +135,7 @@ void DatatypeBindings::bind(pybind11::module& m){ ; - // Bind RawImgFrame - py::class_> rawImgFrame(m, "RawImgFrame", DOC(dai, RawImgFrame)); + rawImgFrame .def(py::init<>()) .def_readwrite("fb", &RawImgFrame::fb) @@ -92,7 +164,8 @@ void DatatypeBindings::bind(pybind11::module& m){ ) ; - py::enum_(rawImgFrame, "Type") + + rawImgFrameType .value("YUV422i", RawImgFrame::Type::YUV422i) .value("YUV444p", RawImgFrame::Type::YUV444p) .value("YUV420p", RawImgFrame::Type::YUV420p) @@ -128,7 +201,7 @@ void DatatypeBindings::bind(pybind11::module& m){ .value("NONE", RawImgFrame::Type::NONE) ; - py::class_(rawImgFrame, "Specs", DOC(dai, RawImgFrame, Specs)) + rawImgFrameSpecs .def(py::init<>()) .def_readwrite("type", &RawImgFrame::Specs::type) .def_readwrite("width", &RawImgFrame::Specs::width) @@ -141,15 +214,12 @@ void DatatypeBindings::bind(pybind11::module& m){ ; - // NNData - py::class_> rawNnData(m, "RawNNData", DOC(dai, RawNNData)); rawNnData .def(py::init<>()) .def_readwrite("tensors", &RawNNData::tensors) .def_readwrite("batchSize", &RawNNData::batchSize) ; - py::class_ tensorInfo(m, "TensorInfo", DOC(dai, TensorInfo)); tensorInfo .def(py::init<>()) .def_readwrite("order", &TensorInfo::order) @@ -161,7 +231,7 @@ void DatatypeBindings::bind(pybind11::module& m){ .def_readwrite("offset", &TensorInfo::offset) ; - py::enum_(tensorInfo, "DataType") + tensorInfoDataType .value("FP16", TensorInfo::DataType::FP16) .value("U8F", TensorInfo::DataType::U8F) .value("INT", TensorInfo::DataType::INT) @@ -169,7 +239,7 @@ void DatatypeBindings::bind(pybind11::module& m){ .value("I8", TensorInfo::DataType::I8) ; - py::enum_(tensorInfo, "StorageOrder") + tensorInfoStorageOrder .value("NHWC", TensorInfo::StorageOrder::NHWC) .value("NHCW", TensorInfo::StorageOrder::NHCW) .value("NCHW", TensorInfo::StorageOrder::NCHW) @@ -186,7 +256,7 @@ void DatatypeBindings::bind(pybind11::module& m){ .value("W", TensorInfo::StorageOrder::W) ; - py::class_(m, "ImgDetection", DOC(dai, ImgDetection)) + imgDetection .def(py::init<>()) .def_readwrite("label", &ImgDetection::label) .def_readwrite("confidence", &ImgDetection::confidence) @@ -196,25 +266,23 @@ void DatatypeBindings::bind(pybind11::module& m){ .def_readwrite("ymax", &ImgDetection::ymax) ; - py::class_(m, "SpatialImgDetection", DOC(dai, SpatialImgDetection)) + + spatialImgDetection .def(py::init<>()) .def_readwrite("spatialCoordinates", &SpatialImgDetection::spatialCoordinates) ; - py::class_> RawImgDetections(m, "RawImgDetections", DOC(dai, RawImgDetections)); - RawImgDetections + rawImgDetections .def(py::init<>()) .def_readwrite("detections", &RawImgDetections::detections) ; - py::class_> RawSpatialImgDetections(m, "RawSpatialImgDetections", DOC(dai, RawSpatialImgDetections)); - RawSpatialImgDetections + rawSpatialImgDetections .def(py::init<>()) .def_readwrite("detections", &RawSpatialImgDetections::detections) ; // Bind RawImageManipConfig - py::class_> rawImageManipConfig(m, "RawImageManipConfig", DOC(dai, RawImageManipConfig)); rawImageManipConfig .def(py::init<>()) .def_readwrite("enableFormat", &RawImageManipConfig::enableFormat) @@ -225,7 +293,7 @@ void DatatypeBindings::bind(pybind11::module& m){ .def_readwrite("formatConfig", &RawImageManipConfig::formatConfig) ; - py::class_(rawImageManipConfig, "CropRect", DOC(dai, RawImageManipConfig, CropRect)) + rawImageManipConfigCropRect .def(py::init<>()) .def_readwrite("xmin", &RawImageManipConfig::CropRect::xmin) .def_readwrite("ymin", &RawImageManipConfig::CropRect::ymin) @@ -233,7 +301,7 @@ void DatatypeBindings::bind(pybind11::module& m){ .def_readwrite("ymax", &RawImageManipConfig::CropRect::ymax) ; - py::class_(rawImageManipConfig, "CropConfig", DOC(dai, RawImageManipConfig, CropConfig)) + rawImageManipCropConfig .def(py::init<>()) .def_readwrite("cropRect", &RawImageManipConfig::CropConfig::cropRect) .def_readwrite("cropRotatedRect", &RawImageManipConfig::CropConfig::cropRotatedRect) @@ -244,7 +312,7 @@ void DatatypeBindings::bind(pybind11::module& m){ .def_readwrite("normalizedCoords", &RawImageManipConfig::CropConfig::normalizedCoords) ; - py::class_(rawImageManipConfig, "ResizeConfig", DOC(dai, RawImageManipConfig, ResizeConfig)) + rawImageManipConfigResizeConfig .def(py::init<>()) .def_readwrite("width", &RawImageManipConfig::ResizeConfig::width) .def_readwrite("height", &RawImageManipConfig::ResizeConfig::height) @@ -263,7 +331,7 @@ void DatatypeBindings::bind(pybind11::module& m){ .def_readwrite("keepAspectRatio", &RawImageManipConfig::ResizeConfig::keepAspectRatio) ; - py::class_(rawImageManipConfig, "FormatConfig", DOC(dai, RawImageManipConfig, FormatConfig)) + rawImageManipConfigFormatConfig .def(py::init<>()) .def_readwrite("type", &RawImageManipConfig::FormatConfig::type) .def_readwrite("flipHorizontal", &RawImageManipConfig::FormatConfig::flipHorizontal) @@ -271,7 +339,6 @@ void DatatypeBindings::bind(pybind11::module& m){ // Bind RawCameraControl - py::class_> rawCameraControl(m, "RawCameraControl", DOC(dai, RawCameraControl)); rawCameraControl .def(py::init<>()) .def_readwrite("cmdMask", &RawCameraControl::cmdMask) @@ -280,7 +347,6 @@ void DatatypeBindings::bind(pybind11::module& m){ // TODO add more raw types here, not directly used ; - py::class_ tracklet(m, "Tracklet", DOC(dai, Tracklet)); tracklet .def(py::init<>()) .def_readwrite("roi", &Tracklet::roi) @@ -291,7 +357,7 @@ void DatatypeBindings::bind(pybind11::module& m){ .def_readwrite("spatialCoordinates", &Tracklet::spatialCoordinates) ; - py::enum_(tracklet, "TrackingStatus", DOC(dai, Tracklet, TrackingStatus)) + trackletTrackingStatus .value("NEW", Tracklet::TrackingStatus::NEW) .value("TRACKED", Tracklet::TrackingStatus::TRACKED) .value("LOST", Tracklet::TrackingStatus::LOST) @@ -299,50 +365,49 @@ void DatatypeBindings::bind(pybind11::module& m){ ; // Bind RawTracklets - py::class_> rawTacklets(m, "RawTracklets", DOC(dai, RawTracklets)); rawTacklets .def(py::init<>()) .def_readwrite("tracklets", &RawTracklets::tracklets) ; - py::class_> imureport(m, "IMUReport", DOC(dai, IMUReport)); - imureport + imuReport .def(py::init<>()) .def_readwrite("sequence", &IMUReport::sequence) .def_readwrite("accuracy", &IMUReport::accuracy) .def_readwrite("timestamp", &IMUReport::timestamp) ; - py::enum_(imureport, "Accuracy") + + imuReportAccuracy .value("UNRELIABLE", IMUReport::Accuracy::UNRELIABLE) .value("LOW", IMUReport::Accuracy::LOW) .value("MEDIUM", IMUReport::Accuracy::MEDIUM) .value("HIGH", IMUReport::Accuracy::HIGH) ; - py::class_>(m, "IMUReportAccelerometer", DOC(dai, IMUReportAccelerometer)) + imuReportAccelerometer .def(py::init<>()) .def_readwrite("x", &IMUReportAccelerometer::x) .def_readwrite("y", &IMUReportAccelerometer::y) .def_readwrite("z", &IMUReportAccelerometer::z) ; - py::class_>(m, "IMUReportGyroscope", DOC(dai, IMUReportGyroscope)) + imuReportGyroscope .def(py::init<>()) .def_readwrite("x", &IMUReportGyroscope::x) .def_readwrite("y", &IMUReportGyroscope::y) .def_readwrite("z", &IMUReportGyroscope::z) ; - py::class_>(m, "IMUReportMagneticField", DOC(dai, IMUReportMagneticField)) + imuReportMagneticField .def(py::init<>()) .def_readwrite("x", &IMUReportMagneticField::x) .def_readwrite("y", &IMUReportMagneticField::y) .def_readwrite("z", &IMUReportMagneticField::z) ; - py::class_>(m, "IMUReportRotationVectorWAcc", DOC(dai, IMUReportRotationVectorWAcc)) + imuReportRotationVectorWAcc .def(py::init<>()) .def_readwrite("i", &IMUReportRotationVectorWAcc::i) .def_readwrite("j", &IMUReportRotationVectorWAcc::j) @@ -393,8 +458,7 @@ void DatatypeBindings::bind(pybind11::module& m){ #endif - py::class_ imuPackets(m, "IMUPacket", DOC(dai, IMUPacket)); - imuPackets + imuPacket .def(py::init<>()) .def_readwrite("acceleroMeter", &IMUPacket::acceleroMeter) .def_readwrite("gyroscope", &IMUPacket::gyroscope) @@ -418,7 +482,6 @@ void DatatypeBindings::bind(pybind11::module& m){ // Bind RawIMUData - py::class_> rawIMUPackets(m, "RawIMUData", DOC(dai, RawIMUData)); rawIMUPackets .def(py::init<>()) .def_readwrite("packets", &RawIMUData::packets) @@ -428,7 +491,7 @@ void DatatypeBindings::bind(pybind11::module& m){ // The enum fields will also be exposed in 'CameraControl', store them for later std::vector camCtrlAttr; camCtrlAttr.push_back("AutoFocusMode"); - py::enum_(rawCameraControl, "AutoFocusMode", DOC(dai, RawCameraControl, AutoFocusMode)) + rawCameraControlAutoFocusMode .value("OFF", RawCameraControl::AutoFocusMode::OFF) .value("AUTO", RawCameraControl::AutoFocusMode::AUTO) .value("MACRO", RawCameraControl::AutoFocusMode::MACRO) @@ -438,7 +501,7 @@ void DatatypeBindings::bind(pybind11::module& m){ ; camCtrlAttr.push_back("AutoWhiteBalanceMode"); - py::enum_(rawCameraControl, "AutoWhiteBalanceMode", DOC(dai, RawCameraControl, AutoWhiteBalanceMode)) + rawCameraControlAutoWhiteBalanceMode .value("OFF", RawCameraControl::AutoWhiteBalanceMode::OFF) .value("AUTO", RawCameraControl::AutoWhiteBalanceMode::AUTO) .value("INCANDESCENT", RawCameraControl::AutoWhiteBalanceMode::INCANDESCENT) @@ -451,7 +514,7 @@ void DatatypeBindings::bind(pybind11::module& m){ ; camCtrlAttr.push_back("SceneMode"); - py::enum_(rawCameraControl, "SceneMode", DOC(dai, RawCameraControl, SceneMode)) + rawCameraControlSceneMode .value("UNSUPPORTED", RawCameraControl::SceneMode::UNSUPPORTED) .value("FACE_PRIORITY", RawCameraControl::SceneMode::FACE_PRIORITY) .value("ACTION", RawCameraControl::SceneMode::ACTION) @@ -472,7 +535,7 @@ void DatatypeBindings::bind(pybind11::module& m){ ; camCtrlAttr.push_back("AntiBandingMode"); - py::enum_(rawCameraControl, "AntiBandingMode", DOC(dai, RawCameraControl, AntiBandingMode)) + rawCameraControlAntiBandingMode .value("OFF", RawCameraControl::AntiBandingMode::OFF) .value("MAINS_50_HZ", RawCameraControl::AntiBandingMode::MAINS_50_HZ) .value("MAINS_60_HZ", RawCameraControl::AntiBandingMode::MAINS_60_HZ) @@ -480,7 +543,7 @@ void DatatypeBindings::bind(pybind11::module& m){ ; camCtrlAttr.push_back("EffectMode"); - py::enum_(rawCameraControl, "EffectMode", DOC(dai, RawCameraControl, EffectMode)) + rawCameraControlEffectMode .value("OFF", RawCameraControl::EffectMode::OFF) .value("MONO", RawCameraControl::EffectMode::MONO) .value("NEGATIVE", RawCameraControl::EffectMode::NEGATIVE) @@ -493,7 +556,6 @@ void DatatypeBindings::bind(pybind11::module& m){ ; // Bind RawSystemInformation - py::class_> rawSystemInformation(m, "RawSystemInformation", DOC(dai, RawSystemInformation)); rawSystemInformation .def(py::init<>()) .def_readwrite("ddrMemoryUsage", &RawSystemInformation::ddrMemoryUsage) @@ -507,10 +569,11 @@ void DatatypeBindings::bind(pybind11::module& m){ // Bind non-raw 'helper' datatypes - py::class_>(m, "ADatatype", DOC(dai, ADatatype)) + adatatype .def("getRaw", &ADatatype::getRaw); - py::class_>(m, "Buffer", DOC(dai, Buffer)) + + buffer .def(py::init<>(), DOC(dai, Buffer, Buffer)) // obj is "Python" object, which we used then to bind the numpy arrays lifespan to @@ -527,7 +590,7 @@ void DatatypeBindings::bind(pybind11::module& m){ ; // Bind ImgFrame - py::class_>(m, "ImgFrame", DOC(dai, ImgFrame)) + imgFrame .def(py::init<>()) // getters .def("getTimestamp", &ImgFrame::getTimestamp, DOC(dai, ImgFrame, getTimestamp)) @@ -735,7 +798,7 @@ void DatatypeBindings::bind(pybind11::module& m){ m.attr("ImgFrame").attr("Type") = m.attr("RawImgFrame").attr("Type"); m.attr("ImgFrame").attr("Specs") = m.attr("RawImgFrame").attr("Specs"); - py::class_(m, "RotatedRect", DOC(dai, RotatedRect)) + rotatedRect .def(py::init<>()) .def_readwrite("center", &RotatedRect::center) .def_readwrite("size", &RotatedRect::size) @@ -743,7 +806,8 @@ void DatatypeBindings::bind(pybind11::module& m){ ; // Bind NNData - py::class_>(m, "NNData", DOC(dai, NNData)) + + nnData .def(py::init<>(), DOC(dai, NNData, NNData)) // setters .def("setLayer", [](NNData& obj, const std::string& name, py::array_t data){ @@ -767,19 +831,22 @@ void DatatypeBindings::bind(pybind11::module& m){ ; // Bind ImgDetections - py::class_>(m, "ImgDetections", DOC(dai, ImgDetections)) + + imgDetections .def(py::init<>(), DOC(dai, ImgDetections, ImgDetections)) .def_property("detections", [](ImgDetections& det) { return &det.detections; }, [](ImgDetections& det, std::vector val) { det.detections = val; }, DOC(dai, ImgDetections, detections)) ; // Bind SpatialImgDetections - py::class_>(m, "SpatialImgDetections", DOC(dai, SpatialImgDetections)) + + spatialImgDetections .def(py::init<>()) .def_property("detections", [](SpatialImgDetections& det) { return &det.detections; }, [](SpatialImgDetections& det, std::vector val) { det.detections = val; }) ; // Bind ImageManipConfig - py::class_>(m, "ImageManipConfig", DOC(dai, ImageManipConfig)) + + imageManipConfig .def(py::init<>()) // setters .def("setCropRect", &ImageManipConfig::setCropRect, py::arg("xmin"), py::arg("ymin"), py::arg("xmax"), py::arg("xmax"), DOC(dai, ImageManipConfig, setCropRect)) @@ -810,7 +877,8 @@ void DatatypeBindings::bind(pybind11::module& m){ ; // Bind CameraControl - py::class_>(m, "CameraControl", DOC(dai, CameraControl)) + + cameraControl .def(py::init<>(), DOC(dai, CameraControl, CameraControl)) // setters .def("setCaptureStill", &CameraControl::setCaptureStill, py::arg("capture"), DOC(dai, CameraControl, setCaptureStill)) @@ -845,7 +913,8 @@ void DatatypeBindings::bind(pybind11::module& m){ } // Bind SystemInformation - py::class_>(m, "SystemInformation", DOC(dai, SystemInformation)) + + systemInformation .def(py::init<>()) .def_property("ddrMemoryUsage", [](SystemInformation& i) { return &i.ddrMemoryUsage; }, [](SystemInformation& i, MemoryInfo val) { i.ddrMemoryUsage = val; } ) .def_property("cmxMemoryUsage", [](SystemInformation& i) { return &i.cmxMemoryUsage; }, [](SystemInformation& i, MemoryInfo val) { i.cmxMemoryUsage = val; } ) @@ -856,7 +925,8 @@ void DatatypeBindings::bind(pybind11::module& m){ .def_property("chipTemperature", [](SystemInformation& i) { return &i.chipTemperature; }, [](SystemInformation& i, ChipTemperature val) { i.chipTemperature = val; } ) ; - py::class_ (m, "SpatialLocations", DOC(dai, SpatialLocations)) + + spatialLocations .def(py::init<>()) .def_readwrite("config", &SpatialLocations::config, DOC(dai, SpatialLocations, config)) .def_readwrite("depthAverage", &SpatialLocations::depthAverage, DOC(dai, SpatialLocations, depthAverage)) @@ -867,7 +937,8 @@ void DatatypeBindings::bind(pybind11::module& m){ ; - py::class_ (m, "Rect", DOC(dai, Rect)) + + rect .def(py::init<>()) .def(py::init()) .def(py::init()) @@ -888,26 +959,30 @@ void DatatypeBindings::bind(pybind11::module& m){ .def_readwrite("height", &Rect::height) ; - py::class_ (m, "SpatialLocationCalculatorConfigThresholds", DOC(dai, SpatialLocationCalculatorConfigThresholds)) + + spatialLocationCalculatorConfigThresholds .def(py::init<>()) .def_readwrite("lowerThreshold", &SpatialLocationCalculatorConfigThresholds::lowerThreshold) .def_readwrite("upperThreshold", &SpatialLocationCalculatorConfigThresholds::upperThreshold) ; - py::class_ (m, "SpatialLocationCalculatorConfigData", DOC(dai, SpatialLocationCalculatorConfigData)) + + spatialLocationCalculatorConfigData .def(py::init<>()) .def_readwrite("roi", &SpatialLocationCalculatorConfigData::roi) .def_readwrite("depthThresholds", &SpatialLocationCalculatorConfigData::depthThresholds) ; // Bind SpatialLocationCalculatorData - py::class_>(m, "SpatialLocationCalculatorData", DOC(dai, SpatialLocationCalculatorData)) + + spatialLocationCalculatorData .def(py::init<>()) .def("getSpatialLocations", &SpatialLocationCalculatorData::getSpatialLocations, DOC(dai, SpatialLocationCalculatorData, getSpatialLocations)) ; // SpatialLocationCalculatorConfig (after ConfigData) - py::class_>(m, "SpatialLocationCalculatorConfig", DOC(dai, SpatialLocationCalculatorConfig)) + + spatialLocationCalculatorConfig .def(py::init<>()) // setters .def("setROIs", &SpatialLocationCalculatorConfig::setROIs, py::arg("ROIs"), DOC(dai, SpatialLocationCalculatorConfig, setROIs)) @@ -916,27 +991,27 @@ void DatatypeBindings::bind(pybind11::module& m){ ; // Tracklets (after ConfigData) - py::class_>(m, "Tracklets", DOC(dai, Tracklets)) + + tracklets .def(py::init<>()) .def_property("tracklets", [](Tracklets& track) { return &track.tracklets; }, [](Tracklets& track, std::vector val) { track.tracklets = val; }, DOC(dai, Tracklets, tracklets)) ; - // IMUData (after ConfigData) - py::class_>(m, "IMUData", DOC(dai, IMUData)) + + imuData .def(py::init<>()) .def_property("packets", [](IMUData& imuDta) { return &imuDta.packets; }, [](IMUData& imuDta, std::vector val) { imuDta.packets = val; }, DOC(dai, IMUData, packets)) ; - // Bind RawStereoDepthConfig - py::class_> rawStereoDepthConfig(m, "RawStereoDepthConfig", DOC(dai, RawStereoDepthConfig)); + rawStereoDepthConfig .def(py::init<>()) .def_readwrite("config", &RawStereoDepthConfig::config) ; - // StereoDepthConfig (after ConfigData) - py::class_>(m, "StereoDepthConfig", DOC(dai, StereoDepthConfig)) + + stereoDepthConfig .def(py::init<>()) .def("setConfidenceThreshold", &StereoDepthConfig::setConfidenceThreshold, py::arg("confThr"), DOC(dai, StereoDepthConfig, setConfidenceThreshold)) .def("setMedianFilter", &StereoDepthConfig::setMedianFilter, py::arg("median"), DOC(dai, StereoDepthConfig, setMedianFilter)) @@ -948,22 +1023,20 @@ void DatatypeBindings::bind(pybind11::module& m){ .def("getLeftRightCheckThreshold", &StereoDepthConfig::getLeftRightCheckThreshold, DOC(dai, StereoDepthConfig, getLeftRightCheckThreshold)) ; - - py::class_ (m, "EdgeDetectorConfigData", DOC(dai, EdgeDetectorConfigData)) + edgeDetectorConfigData .def(py::init<>()) .def_readwrite("sobelFilterHorizontalKernel", &EdgeDetectorConfigData::sobelFilterHorizontalKernel, DOC(dai, EdgeDetectorConfigData, sobelFilterHorizontalKernel)) .def_readwrite("sobelFilterVerticalKernel", &EdgeDetectorConfigData::sobelFilterVerticalKernel, DOC(dai, EdgeDetectorConfigData, sobelFilterVerticalKernel)) ; - // Bind RawEdgeDetectorConfig - py::class_> rawEdgeDetectorConfig(m, "RawEdgeDetectorConfig", DOC(dai, RawEdgeDetectorConfig)); + rawEdgeDetectorConfig .def(py::init<>()) .def_readwrite("config", &RawEdgeDetectorConfig::config) ; - // EdgeDetectorConfig (after ConfigData) - py::class_>(m, "EdgeDetectorConfig", DOC(dai, EdgeDetectorConfig)) + + edgeDetectorConfig .def(py::init<>()) .def("setSobelFilterKernels", &EdgeDetectorConfig::setSobelFilterKernels, py::arg("horizontalKernel"), py::arg("verticalKernel"), DOC(dai, EdgeDetectorConfig, setSobelFilterKernels)) .def("getConfigData", &EdgeDetectorConfig::getConfigData, DOC(dai, EdgeDetectorConfig, getConfigData)) diff --git a/src/DatatypeBindings.hpp b/src/DatatypeBindings.hpp index b7192cb20..21ff435ee 100644 --- a/src/DatatypeBindings.hpp +++ b/src/DatatypeBindings.hpp @@ -4,5 +4,5 @@ #include "pybind11_common.hpp" struct DatatypeBindings { - static void bind(pybind11::module& m); + static void bind(pybind11::module& m, void* pCallstack); }; diff --git a/src/DeviceBindings.cpp b/src/DeviceBindings.cpp index dcf6e782b..3f242a698 100644 --- a/src/DeviceBindings.cpp +++ b/src/DeviceBindings.cpp @@ -112,13 +112,30 @@ std::vector deviceGetQueueEventsHelper(dai::Device& d, const std::v } -void DeviceBindings::bind(pybind11::module& m){ +void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ using namespace dai; + // Type definitions + py::class_ device(m, "Device", DOC(dai, Device)); + + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Bind Device, using DeviceWrapper to be able to destruct the object by calling close() - py::class_(m, "Device", DOC(dai, Device)) + device // Python only methods .def("__enter__", [](py::object obj){ return obj; }) .def("__exit__", [](Device& d, py::object type, py::object value, py::object traceback) { diff --git a/src/DeviceBindings.hpp b/src/DeviceBindings.hpp index a63215e27..a962249d3 100644 --- a/src/DeviceBindings.hpp +++ b/src/DeviceBindings.hpp @@ -4,5 +4,5 @@ #include "pybind11_common.hpp" struct DeviceBindings { - static void bind(pybind11::module& m); + static void bind(pybind11::module& m, void* pCallstack); }; diff --git a/src/DeviceBootloaderBindings.cpp b/src/DeviceBootloaderBindings.cpp index 0fb57d70e..890364a30 100644 --- a/src/DeviceBootloaderBindings.cpp +++ b/src/DeviceBootloaderBindings.cpp @@ -3,14 +3,33 @@ // depthai #include "depthai/device/DeviceBootloader.hpp" -void DeviceBootloaderBindings::bind(pybind11::module& m){ +void DeviceBootloaderBindings::bind(pybind11::module& m, void* pCallstack){ using namespace dai; - // Bind DeviceBootloader + // Type definitions py::class_ deviceBootloader(m, "DeviceBootloader", DOC(dai, DeviceBootloader)); + py::class_ deviceBootloaderVersion(deviceBootloader, "Version", DOC(dai, DeviceBootloader, Version)); + py::enum_ deviceBootloaderType(deviceBootloader, "Type"); + py::enum_ deviceBootloaderMemory(deviceBootloader, "Memory"); + py::enum_ deviceBootloaderSection(deviceBootloader, "Section"); + + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// - py::class_(deviceBootloader, "Version", DOC(dai, DeviceBootloader, Version)) + + // Bind DeviceBootloader + deviceBootloaderVersion .def(py::init(), py::arg("v"), DOC(dai, DeviceBootloader, Version, Version)) .def(py::init(), py::arg("major"), py::arg("minor"), py::arg("patch"), DOC(dai, DeviceBootloader, Version, Version, 2)) .def("__str__", &DeviceBootloader::Version::toString) @@ -19,15 +38,15 @@ void DeviceBootloaderBindings::bind(pybind11::module& m){ .def("__gt__", &DeviceBootloader::Version::operator>) ; - py::enum_(deviceBootloader, "Type") + deviceBootloaderType .value("USB", DeviceBootloader::Type::USB) .value("NETWORK", DeviceBootloader::Type::NETWORK) ; - py::enum_(deviceBootloader, "Memory") + deviceBootloaderMemory .value("FLASH", DeviceBootloader::Memory::FLASH) .value("EMMC", DeviceBootloader::Memory::EMMC) ; - py::enum_(deviceBootloader, "Section") + deviceBootloaderSection .value("HEADER", DeviceBootloader::Section::HEADER) .value("BOOTLOADER", DeviceBootloader::Section::BOOTLOADER) .value("BOOTLOADER_CONFIG", DeviceBootloader::Section::BOOTLOADER_CONFIG) diff --git a/src/DeviceBootloaderBindings.hpp b/src/DeviceBootloaderBindings.hpp index 21883ea96..636a8571d 100644 --- a/src/DeviceBootloaderBindings.hpp +++ b/src/DeviceBootloaderBindings.hpp @@ -4,5 +4,5 @@ #include "pybind11_common.hpp" struct DeviceBootloaderBindings { - static void bind(pybind11::module& m); + static void bind(pybind11::module& m, void* pCallstack); }; diff --git a/src/XLinkConnectionBindings.cpp b/src/XLinkConnectionBindings.cpp index d2f77f215..07b9d37cf 100644 --- a/src/XLinkConnectionBindings.cpp +++ b/src/XLinkConnectionBindings.cpp @@ -5,18 +5,41 @@ #include #include -void XLinkConnectionBindings::bind(pybind11::module& m){ +void XLinkConnectionBindings::bind(pybind11::module& m, void* pCallstack){ using namespace dai; - py::class_(m, "DeviceInfo", DOC(dai, DeviceInfo)) + + // Type definitions + py::class_ deviceInfo(m, "DeviceInfo", DOC(dai, DeviceInfo)); + py::class_ deviceDesc(m, "DeviceDesc"); + py::enum_ xLinkDeviceState(m, "XLinkDeviceState"); + py::enum_ xLinkProtocol(m, "XLinkProtocol"); + py::enum_ xLinkPlatform(m, "XLinkPlatform"); + py::class_> xLinkConnection(m, "XLinkConnection", DOC(dai, XLinkConnection)); + + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Bindings + deviceInfo .def(py::init<>()) .def_readwrite("desc", &DeviceInfo::desc) .def_readwrite("state", &DeviceInfo::state) .def("getMxId", &DeviceInfo::getMxId) ; - py::class_(m, "DeviceDesc") + deviceDesc .def(py::init<>()) .def_readwrite("protocol", &deviceDesc_t::protocol) .def_readwrite("platform", &deviceDesc_t::platform) @@ -26,7 +49,7 @@ void XLinkConnectionBindings::bind(pybind11::module& m){ ) ; - py::enum_(m, "XLinkDeviceState") + xLinkDeviceState .value("X_LINK_ANY_STATE", X_LINK_ANY_STATE) .value("X_LINK_BOOTED", X_LINK_BOOTED) .value("X_LINK_UNBOOTED", X_LINK_UNBOOTED) @@ -35,7 +58,7 @@ void XLinkConnectionBindings::bind(pybind11::module& m){ ; - py::enum_(m, "XLinkProtocol") + xLinkProtocol .value("X_LINK_USB_VSC", X_LINK_USB_VSC) .value("X_LINK_USB_CDC", X_LINK_USB_CDC) .value("X_LINK_PCIE", X_LINK_PCIE) @@ -46,14 +69,14 @@ void XLinkConnectionBindings::bind(pybind11::module& m){ .export_values() ; - py::enum_(m, "XLinkPlatform") + xLinkPlatform .value("X_LINK_ANY_PLATFORM", X_LINK_ANY_PLATFORM) .value("X_LINK_MYRIAD_2", X_LINK_MYRIAD_2) .value("X_LINK_MYRIAD_X", X_LINK_MYRIAD_X) .export_values() ; - py::class_(m, "XLinkConnection", DOC(dai, XLinkConnection)) + xLinkConnection .def(py::init>()) .def(py::init()) .def(py::init()) diff --git a/src/XLinkConnectionBindings.hpp b/src/XLinkConnectionBindings.hpp index 5c60527ab..e2a5288f2 100644 --- a/src/XLinkConnectionBindings.hpp +++ b/src/XLinkConnectionBindings.hpp @@ -4,5 +4,5 @@ #include "pybind11_common.hpp" struct XLinkConnectionBindings { - static void bind(pybind11::module& m); + static void bind(pybind11::module& m, void* pCallstack); }; diff --git a/src/log/LogBindings.cpp b/src/log/LogBindings.cpp index 3d7449bac..496eaeaa7 100644 --- a/src/log/LogBindings.cpp +++ b/src/log/LogBindings.cpp @@ -3,12 +3,29 @@ // depthai #include "depthai-shared/log/LogLevel.hpp" -void LogBindings::bind(pybind11::module& m){ +void LogBindings::bind(pybind11::module& m, void* pCallstack){ using namespace dai; // Bind LogLevel - py::enum_(m, "LogLevel") + py::enum_ logLevel(m, "LogLevel"); + + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + + logLevel .value("TRACE", LogLevel::TRACE) .value("DEBUG", LogLevel::DEBUG) .value("INFO", LogLevel::INFO) @@ -17,5 +34,5 @@ void LogBindings::bind(pybind11::module& m){ .value("CRITICAL", LogLevel::CRITICAL) .value("OFF", LogLevel::OFF) ; - + } \ No newline at end of file diff --git a/src/log/LogBindings.hpp b/src/log/LogBindings.hpp index d64ca2561..06c46a5cf 100644 --- a/src/log/LogBindings.hpp +++ b/src/log/LogBindings.hpp @@ -4,5 +4,5 @@ #include "pybind11_common.hpp" struct LogBindings { - static void bind(pybind11::module& m); + static void bind(pybind11::module& m, void* pCallstack); }; diff --git a/src/openvino/OpenVINOBindings.cpp b/src/openvino/OpenVINOBindings.cpp index eca2a0b04..ea7a434d5 100644 --- a/src/openvino/OpenVINOBindings.cpp +++ b/src/openvino/OpenVINOBindings.cpp @@ -3,12 +3,31 @@ // depthai #include "depthai/openvino/OpenVINO.hpp" -void OpenVINOBindings::bind(pybind11::module& m){ +void OpenVINOBindings::bind(pybind11::module& m, void* pCallstack){ using namespace dai; - // Bind OpenVINO py::class_ openvino(m, "OpenVINO", DOC(dai, OpenVINO)); + py::enum_ openvinoVersion(openvino, "Version", DOC(dai, OpenVINO, Version)); + + + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + + + // Bind OpenVINO openvino .def_static("getVersions", &OpenVINO::getVersions, DOC(dai, OpenVINO, getVersions)) .def_static("getVersionName", &OpenVINO::getVersionName, py::arg("version"), DOC(dai, OpenVINO, getVersionName)) @@ -24,7 +43,8 @@ void OpenVINOBindings::bind(pybind11::module& m){ // and that the values are available directly under OpenVINO.VERSION_2021_4, ... // they are exported // By default, pybind creates strong typed enums, eg: OpenVINO::Version::VERSION_2021_4 - py::enum_(openvino, "Version", DOC(dai, OpenVINO, Version)) + + openvinoVersion .value("VERSION_2020_3", OpenVINO::Version::VERSION_2020_3) .value("VERSION_2020_4", OpenVINO::Version::VERSION_2020_4) .value("VERSION_2021_1", OpenVINO::Version::VERSION_2021_1) diff --git a/src/openvino/OpenVINOBindings.hpp b/src/openvino/OpenVINOBindings.hpp index d1353eb71..61e5585a4 100644 --- a/src/openvino/OpenVINOBindings.hpp +++ b/src/openvino/OpenVINOBindings.hpp @@ -4,5 +4,5 @@ #include "pybind11_common.hpp" struct OpenVINOBindings { - static void bind(pybind11::module& m); + static void bind(pybind11::module& m, void* pCallstack); }; \ No newline at end of file diff --git a/src/pipeline/AssetManagerBindings.cpp b/src/pipeline/AssetManagerBindings.cpp index 258db6fe3..72b3b29f9 100644 --- a/src/pipeline/AssetManagerBindings.cpp +++ b/src/pipeline/AssetManagerBindings.cpp @@ -3,12 +3,32 @@ // depthai #include "depthai/pipeline/AssetManager.hpp" -void AssetManagerBindings::bind(pybind11::module& m){ +void AssetManagerBindings::bind(pybind11::module& m, void* pCallstack){ using namespace dai; + + // Type definitions + py::class_> asset(m, "Asset", DOC(dai, Asset)); + py::class_ assetManager(m, "AssetManager", DOC(dai, AssetManager)); + + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Bind Asset - py::class_>(m, "Asset", DOC(dai, Asset)) + asset .def(py::init<>()) .def(py::init()) .def_readonly("key", &Asset::key) @@ -23,9 +43,8 @@ void AssetManagerBindings::bind(pybind11::module& m){ .def_readwrite("alignment", &Asset::alignment) ; - // Bind AssetManager - py::class_(m, "AssetManager", DOC(dai, AssetManager)) + assetManager .def(py::init<>()) .def("addExisting", &AssetManager::addExisting, py::arg("assets"), DOC(dai, AssetManager, addExisting)) .def("set", static_cast (AssetManager::*)(Asset)>(&AssetManager::set), py::arg("asset"), DOC(dai, AssetManager, set)) diff --git a/src/pipeline/AssetManagerBindings.hpp b/src/pipeline/AssetManagerBindings.hpp index c27dee80c..c46eaf769 100644 --- a/src/pipeline/AssetManagerBindings.hpp +++ b/src/pipeline/AssetManagerBindings.hpp @@ -4,5 +4,5 @@ #include "pybind11_common.hpp" struct AssetManagerBindings { - static void bind(pybind11::module& m); + static void bind(pybind11::module& m, void* pCallstack); }; diff --git a/src/pipeline/CommonBindings.cpp b/src/pipeline/CommonBindings.cpp index 5d8eeb73c..c77ef27d3 100644 --- a/src/pipeline/CommonBindings.cpp +++ b/src/pipeline/CommonBindings.cpp @@ -14,25 +14,57 @@ #include "depthai-shared/common/Size2f.hpp" #include "depthai-shared/common/UsbSpeed.hpp" -void CommonBindings::bind(pybind11::module& m){ +void CommonBindings::bind(pybind11::module& m, void* pCallstack){ using namespace dai; - py::class_(m, "Timestamp", DOC(dai, Timestamp)) + py::class_ timestamp(m, "Timestamp", DOC(dai, Timestamp)); + py::class_ point2f(m, "Point2f", DOC(dai, Point2f)); + py::class_ point3f(m, "Point3f", DOC(dai, Point3f)); + py::class_ size2f(m, "Size2f", DOC(dai, Size2f)); + py::enum_ cameraBoardSocket(m, "CameraBoardSocket", DOC(dai, CameraBoardSocket)); + py::enum_ cameraImageOrientation(m, "CameraImageOrientation", DOC(dai, CameraImageOrientation)); + py::class_ memoryInfo(m, "MemoryInfo", DOC(dai, MemoryInfo)); + py::class_ chipTemperature(m, "ChipTemperature", DOC(dai, ChipTemperature)); + py::class_ cpuUsage(m, "CpuUsage", DOC(dai, CpuUsage)); + py::enum_ cameraModel(m, "CameraModel", DOC(dai, CameraModel)); + py::class_ stereoRectification(m, "StereoRectification", DOC(dai, StereoRectification)); + py::class_ extrinsics(m, "Extrinsics", DOC(dai, Extrinsics)); + py::class_ cameraInfo(m, "CameraInfo", DOC(dai, CameraInfo)); + py::class_ eepromData(m, "EepromData", DOC(dai, EepromData)); + py::enum_ usbSpeed(m, "UsbSpeed", DOC(dai, UsbSpeed)); + py::enum_ processorType(m, "ProcessorType"); + + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + + timestamp .def(py::init<>()) .def_readwrite("sec", &Timestamp::sec) .def_readwrite("nsec", &Timestamp::nsec) .def("get", &Timestamp::get) ; - py::class_(m, "Point2f", DOC(dai, Point2f)) + point2f .def(py::init<>()) .def(py::init()) .def_readwrite("x", &Point2f::x) .def_readwrite("y", &Point2f::y) ; - py::class_(m, "Point3f", DOC(dai, Point3f)) + point3f .def(py::init<>()) .def(py::init()) .def_readwrite("x", &Point3f::x) @@ -40,7 +72,7 @@ void CommonBindings::bind(pybind11::module& m){ .def_readwrite("z", &Point3f::z) ; - py::class_(m, "Size2f", DOC(dai, Size2f)) + size2f .def(py::init<>()) .def(py::init()) .def_readwrite("width", &Size2f::width) @@ -48,7 +80,7 @@ void CommonBindings::bind(pybind11::module& m){ ; // CameraBoardSocket enum bindings - py::enum_(m, "CameraBoardSocket", DOC(dai, CameraBoardSocket)) + cameraBoardSocket .value("AUTO", CameraBoardSocket::AUTO) .value("RGB", CameraBoardSocket::RGB) .value("LEFT", CameraBoardSocket::LEFT) @@ -56,7 +88,7 @@ void CommonBindings::bind(pybind11::module& m){ ; // CameraImageOrientation enum bindings - py::enum_(m, "CameraImageOrientation", DOC(dai, CameraImageOrientation)) + cameraImageOrientation .value("AUTO", CameraImageOrientation::AUTO) .value("NORMAL", CameraImageOrientation::NORMAL) .value("HORIZONTAL_MIRROR", CameraImageOrientation::HORIZONTAL_MIRROR) @@ -65,7 +97,7 @@ void CommonBindings::bind(pybind11::module& m){ ; // MemoryInfo - py::class_(m, "MemoryInfo", DOC(dai, MemoryInfo)) + memoryInfo .def(py::init<>()) .def_readwrite("remaining", &MemoryInfo::remaining) .def_readwrite("used", &MemoryInfo::used) @@ -73,7 +105,7 @@ void CommonBindings::bind(pybind11::module& m){ ; // ChipTemperature - py::class_(m, "ChipTemperature", DOC(dai, ChipTemperature)) + chipTemperature .def(py::init<>()) .def_readwrite("css", &ChipTemperature::css) .def_readwrite("mss", &ChipTemperature::mss) @@ -83,13 +115,13 @@ void CommonBindings::bind(pybind11::module& m){ ; // CpuUsage - py::class_(m, "CpuUsage", DOC(dai, CpuUsage)) + cpuUsage .def(py::init<>()) .def_readwrite("average", &CpuUsage::average) .def_readwrite("msTime", &CpuUsage::msTime) ; // CameraModel enum bindings - py::enum_(m, "CameraModel", DOC(dai, CameraModel)) + cameraModel .value("Perspective", CameraModel::Perspective) .value("Fisheye", CameraModel::Fisheye) .value("Equirectangular", CameraModel::Equirectangular) @@ -97,7 +129,7 @@ void CommonBindings::bind(pybind11::module& m){ ; // StereoRectification - py::class_ (m, "StereoRectification", DOC(dai, StereoRectification)) + stereoRectification .def(py::init<>()) .def_readwrite("rectifiedRotationLeft", &StereoRectification::rectifiedRotationLeft) .def_readwrite("rectifiedRotationRight", &StereoRectification::rectifiedRotationRight) @@ -106,7 +138,7 @@ void CommonBindings::bind(pybind11::module& m){ ; // Extrinsics - py::class_ (m, "Extrinsics", DOC(dai, Extrinsics)) + extrinsics .def(py::init<>()) .def_readwrite("rotationMatrix", &Extrinsics::rotationMatrix) .def_readwrite("translation", &Extrinsics::translation) @@ -115,7 +147,7 @@ void CommonBindings::bind(pybind11::module& m){ ; // CameraInfo - py::class_ (m, "CameraInfo", DOC(dai, CameraInfo)) + cameraInfo .def(py::init<>()) .def_readwrite("width", &CameraInfo::width) .def_readwrite("height", &CameraInfo::height) @@ -127,7 +159,7 @@ void CommonBindings::bind(pybind11::module& m){ ; // EepromData - py::class_ (m, "EepromData", DOC(dai, EepromData)) + eepromData .def(py::init<>()) .def_readwrite("version", &EepromData::version) .def_readwrite("boardName", &EepromData::boardName) @@ -137,7 +169,7 @@ void CommonBindings::bind(pybind11::module& m){ .def_readwrite("imuExtrinsics", &EepromData::imuExtrinsics) ; // UsbSpeed - py::enum_(m, "UsbSpeed", DOC(dai, UsbSpeed)) + usbSpeed .value("UNKNOWN", UsbSpeed::UNKNOWN) .value("LOW", UsbSpeed::LOW) .value("FULL", UsbSpeed::FULL) @@ -147,7 +179,7 @@ void CommonBindings::bind(pybind11::module& m){ ; // ProcessorType - py::enum_(m, "ProcessorType") + processorType .value("LEON_CSS", ProcessorType::LEON_CSS) .value("LEON_MSS", ProcessorType::LEON_MSS) ; diff --git a/src/pipeline/CommonBindings.hpp b/src/pipeline/CommonBindings.hpp index a2dab9ace..333b6c112 100644 --- a/src/pipeline/CommonBindings.hpp +++ b/src/pipeline/CommonBindings.hpp @@ -4,5 +4,5 @@ #include "pybind11_common.hpp" struct CommonBindings { - static void bind(pybind11::module& m); + static void bind(pybind11::module& m, void* pCallstack); }; diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index 6348f9c13..1d8e23603 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -138,26 +138,101 @@ py::class_ bindNodeMap(py::handle scope, const std::string &na } -void NodeBindings::bind(pybind11::module& m){ +void NodeBindings::bind(pybind11::module& m, void* pCallstack){ using namespace dai; + //// Bindings for actual nodes + // Create "namespace" (python submodule) for nodes + using namespace dai::node; + // Move properties into nodes and nodes under 'node' submodule + daiNodeModule = m.def_submodule("node"); - //////////////////////////////////////////////////////////////////////////////////////// - //////////////////////////////////////////////////////////////////////////////////////// - // Node properties bindings first - so function params are resolved - //////////////////////////////////////////////////////////////////////////////////////// - //////////////////////////////////////////////////////////////////////////////////////// - py::class_ colorCameraProperties(m, "ColorCameraProperties", DOC(dai, ColorCameraProperties)); - - py::enum_(colorCameraProperties, "SensorResolution", DOC(dai, ColorCameraProperties, SensorResolution)) + py::enum_ colorCameraPropertiesSensorResolution(colorCameraProperties, "SensorResolution", DOC(dai, ColorCameraProperties, SensorResolution)); + py::enum_ colorCameraPropertiesColorOrder(colorCameraProperties, "ColorOrder", DOC(dai, ColorCameraProperties, ColorOrder)); + py::class_ monoCameraProperties(m, "MonoCameraProperties", DOC(dai, MonoCameraProperties)); + py::enum_ monoCameraPropertiesSensorResolution(monoCameraProperties, "SensorResolution", DOC(dai, MonoCameraProperties, SensorResolution)); + py::class_ stereoDepthProperties(m, "StereoDepthProperties", DOC(dai, StereoDepthProperties)); + py::enum_ medianFilter(m, "MedianFilter", DOC(dai, MedianFilter)); + py::class_ stereoDepthConfigData(m, "StereoDepthConfigData", DOC(dai, StereoDepthConfigData)); + py::class_ videoEncoderProperties(m, "VideoEncoderProperties", DOC(dai, VideoEncoderProperties)); + py::enum_ videoEncoderPropertiesProfile(videoEncoderProperties, "Profile", DOC(dai, VideoEncoderProperties, Profile)); + py::enum_ videoEncoderPropertiesProfileRateControlMode(videoEncoderProperties, "RateControlMode", DOC(dai, VideoEncoderProperties, RateControlMode)); + py::class_ systemLoggerProperties(m, "SystemLoggerProperties", DOC(dai, SystemLoggerProperties)); + py::class_> neuralNetworkProperties(m, "NeuralNetworkProperties", DOC(dai, NeuralNetworkProperties)); + py::class_> detectionNetworkProperties(m, "DetectionNetworkProperties", DOC(dai, DetectionNetworkProperties)); + py::class_> spatialDetectionNetworkProperties(m, "SpatialDetectionNetworkProperties", DOC(dai, SpatialDetectionNetworkProperties)); + py::class_ spatialLocationCalculatorProperties(m, "SpatialLocationCalculatorProperties", DOC(dai, SpatialLocationCalculatorProperties)); + py::enum_ trackerType(m, "TrackerType"); + py::enum_ trackerIdAssigmentPolicy(m, "TrackerIdAssigmentPolicy"); + py::class_> objectTrackerProperties(m, "ObjectTrackerProperties", DOC(dai, ObjectTrackerProperties)); + py::enum_ imuSensor(m, "IMUSensor", DOC(dai, IMUSensor)); + py::class_> imuSensorConfig(m, "IMUSensorConfig", DOC(dai, IMUSensorConfig)); + py::class_ imuProperties(m, "IMUProperties", DOC(dai, IMUProperties)); + py::class_ edgeDetectorProperties(m, "EdgeDetectorProperties", DOC(dai, EdgeDetectorProperties)); + py::class_ spiOutProperties(m, "SPIOutProperties", DOC(dai, SPIOutProperties)); + py::class_ spiInProperties(m, "SPIInProperties", DOC(dai, SPIInProperties)); + py::class_> pyNode(m, "Node", DOC(dai, Node)); + py::class_ pyInput(pyNode, "Input", DOC(dai, Node, Input)); + py::enum_ nodeInputType(pyInput, "Type"); + py::class_ pyOutput(pyNode, "Output", DOC(dai, Node, Output)); + py::enum_ nodeOutputType(pyOutput, "Type"); + // Node::Id bindings + py::class_(pyNode, "Id", "Node identificator. Unique for every node on a single Pipeline"); + // Node::Connection bindings + py::class_ nodeConnection(pyNode, "Connection", DOC(dai, Node, Connection)); + // Node::InputMap bindings + bindNodeMap(pyNode, "InputMap"); + // Node::OutputMap bindings + bindNodeMap(pyNode, "OutputMap"); + auto xlinkIn = ADD_NODE(XLinkIn); + auto xlinkOut = ADD_NODE(XLinkOut); + auto colorCamera = ADD_NODE(ColorCamera); + auto neuralNetwork = ADD_NODE(NeuralNetwork); + auto imageManip = ADD_NODE(ImageManip); + auto monoCamera = ADD_NODE(MonoCamera); + auto stereoDepth = ADD_NODE(StereoDepth); + auto videoEncoder = ADD_NODE(VideoEncoder); + auto spiOut = ADD_NODE(SPIOut); + auto spiIn = ADD_NODE(SPIIn); + auto detectionNetwork = ADD_NODE_DERIVED_ABSTRACT(DetectionNetwork, NeuralNetwork); + auto mobileNetDetectionNetwork = ADD_NODE_DERIVED(MobileNetDetectionNetwork, DetectionNetwork); + auto yoloDetectionNetwork = ADD_NODE_DERIVED(YoloDetectionNetwork, DetectionNetwork); + auto spatialDetectionNetwork = ADD_NODE_DERIVED_ABSTRACT(SpatialDetectionNetwork, DetectionNetwork); + auto mobileNetSpatialDetectionNetwork = ADD_NODE_DERIVED(MobileNetSpatialDetectionNetwork, SpatialDetectionNetwork); + auto yoloSpatialDetectionNetwork = ADD_NODE_DERIVED(YoloSpatialDetectionNetwork, SpatialDetectionNetwork); + auto spatialLocationCalculator = ADD_NODE(SpatialLocationCalculator); + auto systemLogger = ADD_NODE(SystemLogger); + auto objectTracker = ADD_NODE(ObjectTracker); + auto script = ADD_NODE(Script); + auto imu = ADD_NODE(IMU); + auto edgeDetector = ADD_NODE(EdgeDetector); + + + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + + + colorCameraPropertiesSensorResolution .value("THE_1080_P", ColorCameraProperties::SensorResolution::THE_1080_P) .value("THE_4_K", ColorCameraProperties::SensorResolution::THE_4_K) .value("THE_12_MP", ColorCameraProperties::SensorResolution::THE_12_MP) ; - py::enum_(colorCameraProperties, "ColorOrder", DOC(dai, ColorCameraProperties, ColorOrder)) + colorCameraPropertiesColorOrder .value("BGR", ColorCameraProperties::ColorOrder::BGR) .value("RGB", ColorCameraProperties::ColorOrder::RGB) ; @@ -182,9 +257,8 @@ void NodeBindings::bind(pybind11::module& m){ // MonoCamera props - py::class_ monoCameraProperties(m, "MonoCameraProperties", DOC(dai, MonoCameraProperties)); - py::enum_(monoCameraProperties, "SensorResolution", DOC(dai, MonoCameraProperties, SensorResolution)) + monoCameraPropertiesSensorResolution .value("THE_720_P", MonoCameraProperties::SensorResolution::THE_720_P) .value("THE_800_P", MonoCameraProperties::SensorResolution::THE_800_P) .value("THE_400_P", MonoCameraProperties::SensorResolution::THE_400_P) @@ -199,10 +273,8 @@ void NodeBindings::bind(pybind11::module& m){ // StereoDepth props - py::class_ stereoDepthProperties(m, "StereoDepthProperties", DOC(dai, StereoDepthProperties)); // MedianFilter - py::enum_ medianFilter(m, "MedianFilter", DOC(dai, MedianFilter)); medianFilter .value("MEDIAN_OFF", MedianFilter::MEDIAN_OFF) .value("KERNEL_3x3", MedianFilter::KERNEL_3x3) @@ -230,7 +302,6 @@ void NodeBindings::bind(pybind11::module& m){ ; m.attr("StereoDepthProperties").attr("MedianFilter") = medianFilter; - py::class_ stereoDepthConfigData(m, "StereoDepthConfigData", DOC(dai, StereoDepthConfigData)); stereoDepthConfigData .def(py::init<>()) .def_readwrite("median", &StereoDepthConfigData::median, DOC(dai, StereoDepthConfigData, median)) @@ -242,9 +313,8 @@ void NodeBindings::bind(pybind11::module& m){ // VideoEncoder props - py::class_ videoEncoderProperties(m, "VideoEncoderProperties", DOC(dai, VideoEncoderProperties)); - py::enum_(videoEncoderProperties, "Profile", DOC(dai, VideoEncoderProperties, Profile)) + videoEncoderPropertiesProfile .value("H264_BASELINE", VideoEncoderProperties::Profile::H264_BASELINE) .value("H264_HIGH", VideoEncoderProperties::Profile::H264_HIGH) .value("H264_MAIN", VideoEncoderProperties::Profile::H264_MAIN) @@ -252,7 +322,7 @@ void NodeBindings::bind(pybind11::module& m){ .value("MJPEG", VideoEncoderProperties::Profile::MJPEG) ; - py::enum_(videoEncoderProperties, "RateControlMode", DOC(dai, VideoEncoderProperties, RateControlMode)) + videoEncoderPropertiesProfileRateControlMode .value("CBR", VideoEncoderProperties::RateControlMode::CBR) .value("VBR", VideoEncoderProperties::RateControlMode::VBR) ; @@ -272,11 +342,10 @@ void NodeBindings::bind(pybind11::module& m){ // System logger - py::class_(m, "SystemLoggerProperties", DOC(dai, SystemLoggerProperties)) + systemLoggerProperties .def_readwrite("rateHz", &SystemLoggerProperties::rateHz) ; - py::class_> neuralNetworkProperties(m, "NeuralNetworkProperties", DOC(dai, NeuralNetworkProperties)); neuralNetworkProperties .def_readwrite("blobSize", &NeuralNetworkProperties::blobSize) .def_readwrite("blobUri", &NeuralNetworkProperties::blobUri) @@ -286,7 +355,6 @@ void NodeBindings::bind(pybind11::module& m){ ; - py::class_> detectionNetworkProperties(m, "DetectionNetworkProperties", DOC(dai, DetectionNetworkProperties)); detectionNetworkProperties .def_readwrite("nnFamily", &DetectionNetworkProperties::nnFamily) .def_readwrite("confidenceThreshold", &DetectionNetworkProperties::confidenceThreshold) @@ -298,30 +366,27 @@ void NodeBindings::bind(pybind11::module& m){ ; - py::class_> spatialDetectionNetworkProperties(m, "SpatialDetectionNetworkProperties", DOC(dai, SpatialDetectionNetworkProperties)); spatialDetectionNetworkProperties .def_readwrite("detectedBBScaleFactor", &SpatialDetectionNetworkProperties::detectedBBScaleFactor) .def_readwrite("depthThresholds", &SpatialDetectionNetworkProperties::depthThresholds) ; - py::class_ spatialLocationCalculatorProperties(m, "SpatialLocationCalculatorProperties", DOC(dai, SpatialLocationCalculatorProperties)); spatialLocationCalculatorProperties .def_readwrite("roiConfig", &SpatialLocationCalculatorProperties::roiConfig) .def_readwrite("inputConfigSync", &SpatialLocationCalculatorProperties::inputConfigSync) ; - py::enum_(m, "TrackerType") + trackerType .value("ZERO_TERM_IMAGELESS", TrackerType::ZERO_TERM_IMAGELESS) .value("ZERO_TERM_COLOR_HISTOGRAM", TrackerType::ZERO_TERM_COLOR_HISTOGRAM) ; - py::enum_(m, "TrackerIdAssigmentPolicy") + trackerIdAssigmentPolicy .value("UNIQUE_ID", TrackerIdAssigmentPolicy::UNIQUE_ID) .value("SMALLEST_ID", TrackerIdAssigmentPolicy::SMALLEST_ID) ; - py::class_> objectTrackerProperties(m, "ObjectTrackerProperties", DOC(dai, ObjectTrackerProperties)); objectTrackerProperties .def_readwrite("trackerThreshold", &ObjectTrackerProperties::trackerThreshold) .def_readwrite("maxObjectsToTrack", &ObjectTrackerProperties::maxObjectsToTrack) @@ -331,7 +396,7 @@ void NodeBindings::bind(pybind11::module& m){ ; // IMU node properties - py::enum_(m, "IMUSensor", DOC(dai, IMUSensor)) + imuSensor .value("ACCELEROMETER_RAW", IMUSensor::ACCELEROMETER_RAW, DOC(dai, IMUSensor, ACCELEROMETER_RAW)) .value("ACCELEROMETER", IMUSensor::ACCELEROMETER, DOC(dai, IMUSensor, ACCELEROMETER)) .value("LINEAR_ACCELERATION", IMUSensor::LINEAR_ACCELERATION, DOC(dai, IMUSensor, LINEAR_ACCELERATION)) @@ -350,7 +415,7 @@ void NodeBindings::bind(pybind11::module& m){ // .value("GYRO_INTEGRATED_ROTATION_VECTOR", IMUSensor::GYRO_INTEGRATED_ROTATION_VECTOR) ; - py::class_>(m, "IMUSensorConfig", DOC(dai, IMUSensorConfig)) + imuSensorConfig .def(py::init<>()) .def_readwrite("sensitivityEnabled", &IMUSensorConfig::sensitivityEnabled) .def_readwrite("sensitivityRelative", &IMUSensorConfig::sensitivityRelative) @@ -359,7 +424,6 @@ void NodeBindings::bind(pybind11::module& m){ .def_readwrite("sensorId", &IMUSensorConfig::sensorId) ; - py::class_ imuProperties(m, "IMUProperties", DOC(dai, IMUProperties)); imuProperties .def_readwrite("imuSensors", &IMUProperties::imuSensors, DOC(dai, IMUProperties, imuSensors)) .def_readwrite("batchReportThreshold", &IMUProperties::batchReportThreshold, DOC(dai, IMUProperties, batchReportThreshold)) @@ -367,7 +431,6 @@ void NodeBindings::bind(pybind11::module& m){ ; // EdgeDetector node properties - py::class_ edgeDetectorProperties(m, "EdgeDetectorProperties", DOC(dai, EdgeDetectorProperties)); edgeDetectorProperties .def_readwrite("initialConfig", &EdgeDetectorProperties::initialConfig, DOC(dai, EdgeDetectorProperties, initialConfig)) .def_readwrite("inputConfigSync", &EdgeDetectorProperties::inputConfigSync, DOC(dai, EdgeDetectorProperties, inputConfigSync)) @@ -376,14 +439,12 @@ void NodeBindings::bind(pybind11::module& m){ ; // SPIOut properties - py::class_ spiOutProperties(m, "SPIOutProperties", DOC(dai, SPIOutProperties)); spiOutProperties .def_readwrite("streamName", &SPIOutProperties::streamName) .def_readwrite("busId", &SPIOutProperties::busId) ; // SPIIn properties - py::class_ spiInProperties(m, "SPIInProperties", DOC(dai, SPIInProperties)); spiInProperties .def_readwrite("streamName", &SPIInProperties::streamName) .def_readwrite("busId", &SPIInProperties::busId) @@ -399,11 +460,9 @@ void NodeBindings::bind(pybind11::module& m){ //////////////////////////////////////////////////////////////////////////////////////// // Base 'Node' class binding - py::class_> pyNode(m, "Node", DOC(dai, Node)); // Node::Input bindings - py::class_ pyInput(pyNode, "Input", DOC(dai, Node, Input)); - py::enum_(pyInput, "Type") + nodeInputType .value("SReceiver", Node::Input::Type::SReceiver) .value("MReceiver", Node::Input::Type::MReceiver) ; @@ -417,8 +476,7 @@ void NodeBindings::bind(pybind11::module& m){ ; // Node::Output bindings - py::class_ pyOutput(pyNode, "Output", DOC(dai, Node, Output)); - py::enum_(pyOutput, "Type") + nodeOutputType .value("MSender", Node::Output::Type::MSender) .value("SSender", Node::Output::Type::SSender) ; @@ -428,11 +486,9 @@ void NodeBindings::bind(pybind11::module& m){ .def("unlink", &Node::Output::unlink, py::arg("in"), DOC(dai, Node, Output, unlink)) .def("getConnections", &Node::Output::getConnections, DOC(dai, Node, Output, getConnections)) ; - // Node::Id bindings - py::class_(pyNode, "Id", "Node identificator. Unique for every node on a single Pipeline"); - // Node::Connection bindings - py::class_(pyNode, "Connection", DOC(dai, Node, Connection)) + + nodeConnection .def_property("outputId", [](Node::Connection& conn) { return conn.outputId; }, [](Node::Connection& conn, Node::Id id) {conn.outputId = id; }, DOC(dai, Node, Connection, outputId)) .def_property("outputName", [](Node::Connection& conn) { return conn.outputName; }, [](Node::Connection& conn, std::string name) {conn.outputName = name; }, DOC(dai, Node, Connection, outputName)) .def_property("inputId", [](Node::Connection& conn) { return conn.inputId; }, [](Node::Connection& conn, Node::Id id) {conn.inputId = id; }, DOC(dai, Node, Connection, inputId)) @@ -464,21 +520,9 @@ void NodeBindings::bind(pybind11::module& m){ // .def_readwrite("inputName", &dai::Node::Connection::inputName) // ; - // Node::InputMap bindings - bindNodeMap(pyNode, "InputMap"); - // Node::OutputMap bindings - bindNodeMap(pyNode, "OutputMap"); - - - //// Bindings for actual nodes - // Create "namespace" (python submodule) for nodes - using namespace dai::node; - //daiNodeModule = m; - // Move properties into nodes and nodes under 'node' submodule - daiNodeModule = m.def_submodule("node"); // XLinkIn node - ADD_NODE(XLinkIn) + xlinkIn .def_readonly("out", &XLinkIn::out, DOC(dai, node, XLinkIn, out)) .def("setStreamName", &XLinkIn::setStreamName, py::arg("streamName"), DOC(dai, node, XLinkIn, setStreamName)) .def("setMaxDataSize", &XLinkIn::setMaxDataSize, py::arg("maxDataSize"), DOC(dai, node, XLinkIn, setMaxDataSize)) @@ -489,7 +533,7 @@ void NodeBindings::bind(pybind11::module& m){ ; // XLinkOut node - ADD_NODE(XLinkOut) + xlinkOut .def_readonly("input", &XLinkOut::input, DOC(dai, node, XLinkOut, input)) .def("setStreamName", &XLinkOut::setStreamName, py::arg("streamName"), DOC(dai, node, XLinkOut, setStreamName)) .def("setFpsLimit", &XLinkOut::setFpsLimit, py::arg("fpsLimit"), DOC(dai, node, XLinkOut, setFpsLimit)) @@ -500,7 +544,7 @@ void NodeBindings::bind(pybind11::module& m){ ; // ColorCamera node - ADD_NODE(ColorCamera) + colorCamera .def_readonly("inputConfig", &ColorCamera::inputConfig, DOC(dai, node, ColorCamera, inputConfig)) .def_readonly("inputControl", &ColorCamera::inputControl, DOC(dai, node, ColorCamera, inputControl)) .def_readonly("initialControl", &ColorCamera::initialControl, DOC(dai, node, ColorCamera, initialControl)) @@ -580,7 +624,7 @@ void NodeBindings::bind(pybind11::module& m){ // NeuralNetwork node - ADD_NODE(NeuralNetwork) + neuralNetwork .def_readonly("input", &NeuralNetwork::input, DOC(dai, node, NeuralNetwork, input)) .def_readonly("out", &NeuralNetwork::out, DOC(dai, node, NeuralNetwork, out)) .def_readonly("passthrough", &NeuralNetwork::passthrough, DOC(dai, node, NeuralNetwork, passthrough)) @@ -595,7 +639,7 @@ void NodeBindings::bind(pybind11::module& m){ // ImageManip node - ADD_NODE(ImageManip) + imageManip .def_readonly("inputConfig", &ImageManip::inputConfig, DOC(dai, node, ImageManip, inputConfig)) .def_readonly("inputImage", &ImageManip::inputImage, DOC(dai, node, ImageManip, inputImage)) .def_readonly("out", &ImageManip::out, DOC(dai, node, ImageManip, out)) @@ -663,7 +707,7 @@ void NodeBindings::bind(pybind11::module& m){ ; // MonoCamera node - ADD_NODE(MonoCamera) + monoCamera .def_readonly("inputControl", &MonoCamera::inputControl, DOC(dai, node, MonoCamera, inputControl)) .def_readonly("out", &MonoCamera::out, DOC(dai, node, MonoCamera, out)) .def_readonly("raw", &MonoCamera::raw, DOC(dai, node, MonoCamera, raw)) @@ -701,7 +745,7 @@ void NodeBindings::bind(pybind11::module& m){ // StereoDepth node - ADD_NODE(StereoDepth) + stereoDepth .def_readonly("initialConfig", &StereoDepth::initialConfig, DOC(dai, node, StereoDepth, initialConfig)) .def_readonly("inputConfig", &StereoDepth::inputConfig, DOC(dai, node, StereoDepth, inputConfig)) .def_readonly("left", &StereoDepth::left, DOC(dai, node, StereoDepth, left)) @@ -785,7 +829,7 @@ void NodeBindings::bind(pybind11::module& m){ daiNodeModule.attr("StereoDepth").attr("Properties") = stereoDepthProperties; // VideoEncoder node - ADD_NODE(VideoEncoder) + videoEncoder .def_readonly("input", &VideoEncoder::input, DOC(dai, node, VideoEncoder, input), DOC(dai, node, VideoEncoder, input)) .def_readonly("bitstream", &VideoEncoder::bitstream, DOC(dai, node, VideoEncoder, bitstream), DOC(dai, node, VideoEncoder, bitstream)) .def("setDefaultProfilePreset", static_cast(&VideoEncoder::setDefaultProfilePreset), py::arg("width"), py::arg("height"), py::arg("fps"), py::arg("profile"), DOC(dai, node, VideoEncoder, setDefaultProfilePreset)) @@ -821,7 +865,7 @@ void NodeBindings::bind(pybind11::module& m){ daiNodeModule.attr("VideoEncoder").attr("Properties") = videoEncoderProperties; - ADD_NODE(SPIOut) + spiOut .def_readonly("input", &SPIOut::input, DOC(dai, node, SPIOut, input)) .def("setStreamName", &SPIOut::setStreamName, py::arg("name"), DOC(dai, node, SPIOut, setStreamName)) .def("setBusId", &SPIOut::setBusId, py::arg("id"), DOC(dai, node, SPIOut, setBusId)) @@ -831,7 +875,7 @@ void NodeBindings::bind(pybind11::module& m){ // SPIIn node - ADD_NODE(SPIIn) + spiIn .def_readonly("out", &SPIIn::out, DOC(dai, node, SPIIn, out)) .def("setStreamName", &SPIIn::setStreamName, py::arg("name"), DOC(dai, node, SPIIn, setStreamName)) .def("setBusId", &SPIIn::setBusId, py::arg("id"), DOC(dai, node, SPIIn, setBusId)) @@ -846,7 +890,7 @@ void NodeBindings::bind(pybind11::module& m){ daiNodeModule.attr("SPIIn").attr("Properties") = spiInProperties; // DetectionNetwork node - ADD_NODE_DERIVED_ABSTRACT(DetectionNetwork, NeuralNetwork) + detectionNetwork .def_readonly("input", &DetectionNetwork::input, DOC(dai, node, DetectionNetwork, input)) .def_readonly("out", &DetectionNetwork::out, DOC(dai, node, DetectionNetwork, out)) .def_readonly("passthrough", &DetectionNetwork::passthrough, DOC(dai, node, DetectionNetwork, passthrough)) @@ -856,12 +900,9 @@ void NodeBindings::bind(pybind11::module& m){ daiNodeModule.attr("DetectionNetwork").attr("Properties") = detectionNetworkProperties; - // MobileNetDetectionNetwork node - ADD_NODE_DERIVED(MobileNetDetectionNetwork, DetectionNetwork) - ; // YoloDetectionNetwork node - ADD_NODE_DERIVED(YoloDetectionNetwork, DetectionNetwork) + yoloDetectionNetwork .def("setNumClasses", &YoloDetectionNetwork::setNumClasses, py::arg("numClasses"), DOC(dai, node, YoloDetectionNetwork, setNumClasses)) .def("setCoordinateSize", &YoloDetectionNetwork::setCoordinateSize, py::arg("coordinates"), DOC(dai, node, YoloDetectionNetwork, setCoordinateSize)) .def("setAnchors", &YoloDetectionNetwork::setAnchors, py::arg("anchors"), DOC(dai, node, YoloDetectionNetwork, setAnchors)) @@ -869,7 +910,7 @@ void NodeBindings::bind(pybind11::module& m){ .def("setIouThreshold", &YoloDetectionNetwork::setIouThreshold, py::arg("thresh"), DOC(dai, node, YoloDetectionNetwork, setIouThreshold)) ; - ADD_NODE_DERIVED_ABSTRACT(SpatialDetectionNetwork, DetectionNetwork) + spatialDetectionNetwork .def_readonly("input", &SpatialDetectionNetwork::input, DOC(dai, node, SpatialDetectionNetwork, input)) .def_readonly("inputDepth", &SpatialDetectionNetwork::inputDepth, DOC(dai, node, SpatialDetectionNetwork, inputDepth)) .def_readonly("out", &SpatialDetectionNetwork::out, DOC(dai, node, SpatialDetectionNetwork, out)) @@ -885,11 +926,9 @@ void NodeBindings::bind(pybind11::module& m){ daiNodeModule.attr("SpatialDetectionNetwork").attr("Properties") = spatialDetectionNetworkProperties; // MobileNetSpatialDetectionNetwork - ADD_NODE_DERIVED(MobileNetSpatialDetectionNetwork, SpatialDetectionNetwork) - ; // YoloSpatialDetectionNetwork node - ADD_NODE_DERIVED(YoloSpatialDetectionNetwork, SpatialDetectionNetwork) + yoloSpatialDetectionNetwork .def("setNumClasses", &YoloSpatialDetectionNetwork::setNumClasses, py::arg("numClasses"), DOC(dai, node, YoloSpatialDetectionNetwork, setNumClasses)) .def("setCoordinateSize", &YoloSpatialDetectionNetwork::setCoordinateSize, py::arg("coordinates"), DOC(dai, node, YoloSpatialDetectionNetwork, setCoordinateSize)) .def("setAnchors", &YoloSpatialDetectionNetwork::setAnchors, py::arg("anchors"), DOC(dai, node, YoloSpatialDetectionNetwork, setAnchors)) @@ -898,7 +937,8 @@ void NodeBindings::bind(pybind11::module& m){ ; // SpatialLocationCalculator node - ADD_NODE(SpatialLocationCalculator) + + spatialLocationCalculator .def_readonly("inputConfig", &SpatialLocationCalculator::inputConfig, DOC(dai, node, SpatialLocationCalculator, inputConfig)) .def_readonly("inputDepth", &SpatialLocationCalculator::inputDepth, DOC(dai, node, SpatialLocationCalculator, inputDepth)) .def_readonly("out", &SpatialLocationCalculator::out, DOC(dai, node, SpatialLocationCalculator, out)) @@ -910,13 +950,13 @@ void NodeBindings::bind(pybind11::module& m){ daiNodeModule.attr("SpatialLocationCalculator").attr("Properties") = spatialLocationCalculatorProperties; // SystemLogger node - ADD_NODE(SystemLogger) + systemLogger .def_readonly("out", &SystemLogger::out, DOC(dai, node, SystemLogger, out)) .def("setRate", &SystemLogger::setRate, py::arg("hz"), DOC(dai, node, SystemLogger, setRate)) ; // NeuralNetwork node - ADD_NODE(ObjectTracker) + objectTracker .def_readonly("inputTrackerFrame", &ObjectTracker::inputTrackerFrame, DOC(dai, node, ObjectTracker, inputTrackerFrame)) .def_readonly("inputDetectionFrame", &ObjectTracker::inputDetectionFrame, DOC(dai, node, ObjectTracker, inputDetectionFrame)) .def_readonly("inputDetections", &ObjectTracker::inputDetections, DOC(dai, node, ObjectTracker, inputDetections)) @@ -934,7 +974,7 @@ void NodeBindings::bind(pybind11::module& m){ daiNodeModule.attr("ObjectTracker").attr("Properties") = objectTrackerProperties; // Script node - ADD_NODE(Script) + script .def_readonly("inputs", &Script::inputs) .def_readonly("outputs", &Script::outputs) .def("setScriptPath", &Script::setScriptPath, DOC(dai, node, Script, setScriptPath)) @@ -948,7 +988,7 @@ void NodeBindings::bind(pybind11::module& m){ // IMU node - ADD_NODE(IMU) + imu .def_readonly("out", &IMU::out, DOC(dai, node, IMU, out)) .def("enableIMUSensor", static_cast(&IMU::enableIMUSensor), py::arg("sensorConfig"), DOC(dai, node, IMU, enableIMUSensor)) .def("enableIMUSensor", static_cast& imuSensors)>(&IMU::enableIMUSensor), py::arg("sensorConfigs"), DOC(dai, node, IMU, enableIMUSensor, 2)) @@ -962,7 +1002,8 @@ void NodeBindings::bind(pybind11::module& m){ daiNodeModule.attr("IMU").attr("Properties") = imuProperties; // EdgeDetector node - ADD_NODE(EdgeDetector) + + edgeDetector .def_readonly("initialConfig", &EdgeDetector::initialConfig, DOC(dai, node, EdgeDetector, initialConfig)) .def_readonly("inputConfig", &EdgeDetector::inputConfig, DOC(dai, node, EdgeDetector, inputConfig)) .def_readonly("inputImage", &EdgeDetector::inputImage, DOC(dai, node, EdgeDetector, inputImage)) diff --git a/src/pipeline/NodeBindings.hpp b/src/pipeline/NodeBindings.hpp index 76cb1173d..bd3b1994b 100644 --- a/src/pipeline/NodeBindings.hpp +++ b/src/pipeline/NodeBindings.hpp @@ -7,6 +7,6 @@ #include "depthai/pipeline/Node.hpp" struct NodeBindings : public dai::Node { - static void bind(pybind11::module& m); + static void bind(pybind11::module& m, void* pCallstack); static std::vector(dai::Pipeline&, py::object class_)>>> getNodeCreateMap(); }; diff --git a/src/pipeline/PipelineBindings.cpp b/src/pipeline/PipelineBindings.cpp index 61c7c3a62..4583bf9ab 100644 --- a/src/pipeline/PipelineBindings.cpp +++ b/src/pipeline/PipelineBindings.cpp @@ -42,13 +42,29 @@ std::shared_ptr createNode(dai::Pipeline& p, py::object class_){ return nullptr; } -void PipelineBindings::bind(pybind11::module& m){ - +void PipelineBindings::bind(pybind11::module& m, void* pCallstack){ using namespace dai; + // Type definitions + py::class_ globalProperties(m, "GlobalProperties", DOC(dai, GlobalProperties)); + py::class_ pipeline(m, "Pipeline", DOC(dai, Pipeline, 2)); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Bind global properties - py::class_(m, "GlobalProperties", DOC(dai, GlobalProperties)) + globalProperties .def_readwrite("leonOsFrequencyHz", &GlobalProperties::leonCssFrequencyHz) .def_readwrite("leonRtFrequencyHz", &GlobalProperties::leonMssFrequencyHz) .def_readwrite("pipelineName", &GlobalProperties::pipelineName) @@ -58,7 +74,7 @@ void PipelineBindings::bind(pybind11::module& m){ ; // bind pipeline - py::class_(m, "Pipeline", DOC(dai, Pipeline, 2)) + pipeline .def(py::init<>(), DOC(dai, Pipeline, Pipeline)) //.def(py::init()) .def("getGlobalProperties", &Pipeline::getGlobalProperties, DOC(dai, Pipeline, getGlobalProperties)) @@ -111,4 +127,5 @@ void PipelineBindings::bind(pybind11::module& m){ .def("createEdgeDetector", &Pipeline::create) ; + } diff --git a/src/pipeline/PipelineBindings.hpp b/src/pipeline/PipelineBindings.hpp index 9df795126..3e759ddd8 100644 --- a/src/pipeline/PipelineBindings.hpp +++ b/src/pipeline/PipelineBindings.hpp @@ -4,5 +4,5 @@ #include "pybind11_common.hpp" struct PipelineBindings { - static void bind(pybind11::module& m); + static void bind(pybind11::module& m, void* pCallstack); }; diff --git a/src/py_bindings.cpp b/src/py_bindings.cpp index ad7cdbc26..75bcf9ec7 100644 --- a/src/py_bindings.cpp +++ b/src/py_bindings.cpp @@ -39,18 +39,25 @@ PYBIND11_MODULE(depthai,m) m.attr("__build_datetime__") = DEPTHAI_PYTHON_BUILD_DATETIME; // Add bindings - CommonBindings::bind(m); - DatatypeBindings::bind(m); - LogBindings::bind(m); - DataQueueBindings::bind(m); - OpenVINOBindings::bind(m); - AssetManagerBindings::bind(m); - PipelineBindings::bind(m); - NodeBindings::bind(m); - XLinkConnectionBindings::bind(m); - DeviceBindings::bind(m); - DeviceBootloaderBindings::bind(m); - CalibrationHandlerBindings::bind(m); + std::deque callstack; + callstack.push_front(&DatatypeBindings::bind); + callstack.push_front(&LogBindings::bind); + callstack.push_front(&DataQueueBindings::bind); + callstack.push_front(&OpenVINOBindings::bind); + callstack.push_front(&AssetManagerBindings::bind); + callstack.push_front(&NodeBindings::bind); + callstack.push_front(&PipelineBindings::bind); + callstack.push_front(&XLinkConnectionBindings::bind); + callstack.push_front(&DeviceBindings::bind); + callstack.push_front(&DeviceBootloaderBindings::bind); + callstack.push_front(&CalibrationHandlerBindings::bind); + // end of the callstack + callstack.push_front([](py::module&, void*){}); + + Callstack callstackAdapter(callstack); + + // Initial call + CommonBindings::bind(m, &callstackAdapter); // Call dai::initialize on 'import depthai' to initialize asap with additional information to print dai::initialize(std::string("Python bindings - version: ") + DEPTHAI_PYTHON_VERSION + " from " + DEPTHAI_PYTHON_COMMIT_DATETIME + " build: " + DEPTHAI_PYTHON_BUILD_DATETIME); diff --git a/src/pybind11_common.hpp b/src/pybind11_common.hpp index 8add9f012..8fbab8dd8 100644 --- a/src/pybind11_common.hpp +++ b/src/pybind11_common.hpp @@ -10,6 +10,7 @@ #include #include #include +#include // Include docstring file #include "docstring.hpp" @@ -24,4 +25,6 @@ namespace pybind11 { namespace detail { namespace py = pybind11; +using StackFunction = void (*)(pybind11::module &m, void *pCallstack); +using Callstack = std::stack; diff --git a/src/utility/ResourcesBindings.cpp b/src/utility/ResourcesBindings.cpp index dd6d4c8df..0c87866ce 100644 --- a/src/utility/ResourcesBindings.cpp +++ b/src/utility/ResourcesBindings.cpp @@ -3,13 +3,13 @@ // depthai // include resources #include "depthai/" -void ResourcesBindings::bind(pybind11::module& m){ +void ResourcesBindings::bind(pybind11::module& m, void* pCallstack){ using namespace dai; // Bind Resources (if needed) py::class_>(m, "Resources") - .def(py::init([](){ + .def(py::init([](){ return std::unique_ptr>(&Resources::getInstance()); }); diff --git a/src/utility/ResourcesBindings.hpp b/src/utility/ResourcesBindings.hpp index e5af26db1..ac36b73db 100644 --- a/src/utility/ResourcesBindings.hpp +++ b/src/utility/ResourcesBindings.hpp @@ -4,5 +4,5 @@ #include "pybind11_common.hpp" struct ResourcesBindings { - static void bind(pybind11::module& m); + static void bind(pybind11::module& m, void* pCallstack); }; \ No newline at end of file From eb60e7f05d0f25fced6c4501d8593ce050ab8aa0 Mon Sep 17 00:00:00 2001 From: Martin Peterlin Date: Tue, 27 Jul 2021 16:31:15 +0200 Subject: [PATCH 36/40] Added documentation for bootloader_version example --- docs/source/samples/bootloader_version.rst | 32 ++++++++++++++++++++++ docs/source/tutorials/simple_samples.rst | 2 ++ 2 files changed, 34 insertions(+) create mode 100644 docs/source/samples/bootloader_version.rst diff --git a/docs/source/samples/bootloader_version.rst b/docs/source/samples/bootloader_version.rst new file mode 100644 index 000000000..c45c5ecc2 --- /dev/null +++ b/docs/source/samples/bootloader_version.rst @@ -0,0 +1,32 @@ +Bootloader Version +================== + +This example shows basic bootloader interaction, retrieving the version of bootloader running on the device. + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../examples/bootloader_version.py + :language: python + :linenos: + + .. tab:: C++ + + Also `available on GitHub `__ + + .. literalinclude:: ../../../depthai-core/examples/src/bootloader_version.cpp + :language: cpp + :linenos: + +.. include:: /includes/footer-short.rst diff --git a/docs/source/tutorials/simple_samples.rst b/docs/source/tutorials/simple_samples.rst index 69853f745..9e86efb33 100644 --- a/docs/source/tutorials/simple_samples.rst +++ b/docs/source/tutorials/simple_samples.rst @@ -23,6 +23,7 @@ Simple ../samples/imu_rotation_vector.rst ../samples/edge_detector.rst ../samples/script_camera_control.rst + ../samples/bootloader_version.rst These samples are great starting point for the gen2 API. @@ -40,3 +41,4 @@ These samples are great starting point for the gen2 API. - :ref:`Mono & MobilenetSSD` - Runs MobileNetSSD on mono frames and displays detections on the frame - :ref:`Video & MobilenetSSD` - Runs MobileNetSSD on the video from the host - :ref:`Edge detector` - Edge detection on input frame +- :ref:`Bootloader Version` - Retrieves Version of Bootloader on the device From eb35106a8ccd0c9de19a7e3e2460fc1cdf2742a9 Mon Sep 17 00:00:00 2001 From: Martin Peterlin Date: Wed, 28 Jul 2021 17:24:36 +0200 Subject: [PATCH 37/40] Added C++14 flag for pybind11_mkdoc --- cmake/pybind11-mkdoc.cmake | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmake/pybind11-mkdoc.cmake b/cmake/pybind11-mkdoc.cmake index 2a9b7425a..271926d08 100644 --- a/cmake/pybind11-mkdoc.cmake +++ b/cmake/pybind11-mkdoc.cmake @@ -56,6 +56,8 @@ function(pybind11_mkdoc_setup_internal target output_path mkdoc_headers enforce) # Docstring wrap width -w 80 -o "${output_path}" + # C++ standard + -std=c++14 # List of include directories "-I$,;-I>" # List of compiler definitions From d320303c753a6490c402b1487d9962fbf1b9ccad Mon Sep 17 00:00:00 2001 From: Erol444 Date: Thu, 29 Jul 2021 12:43:22 +0200 Subject: [PATCH 38/40] Added demo output to the bootloader docs --- docs/source/components/bootloader.rst | 2 ++ docs/source/samples/bootloader_version.rst | 15 +++++++++++++-- docs/source/tutorials/code_samples.rst | 1 + 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/docs/source/components/bootloader.rst b/docs/source/components/bootloader.rst index b7a58c87c..a318184a5 100644 --- a/docs/source/components/bootloader.rst +++ b/docs/source/components/bootloader.rst @@ -36,6 +36,8 @@ or update the bootloader itself. progressCb parameter takes a callback function, which will be called each time an progress update occurs (rate limited to 1 second). This is mainly used to inform the user of the current flashing progress. +You can also check the version of the current bootloader by using the :ref:`Bootloader Version` example. + DepthAI Application Package (.dap) ################################## diff --git a/docs/source/samples/bootloader_version.rst b/docs/source/samples/bootloader_version.rst index c45c5ecc2..a568f0b50 100644 --- a/docs/source/samples/bootloader_version.rst +++ b/docs/source/samples/bootloader_version.rst @@ -3,6 +3,17 @@ Bootloader Version This example shows basic bootloader interaction, retrieving the version of bootloader running on the device. +Demo +#### + +Example script output + +.. code-block:: bash + + ~/depthai-python/examples$ python3 bootloader_version.py + Found device with name: 14442C1031425FD700-ma2480 + Version: 0.0.12 + Setup ##### @@ -15,7 +26,7 @@ Source code .. tab:: Python - Also `available on GitHub `__ + Also `available on GitHub `__ .. literalinclude:: ../../../examples/bootloader_version.py :language: python @@ -23,7 +34,7 @@ Source code .. tab:: C++ - Also `available on GitHub `__ + Also `available on GitHub `__ .. literalinclude:: ../../../depthai-core/examples/src/bootloader_version.cpp :language: cpp diff --git a/docs/source/tutorials/code_samples.rst b/docs/source/tutorials/code_samples.rst index 416e91e97..1488c7693 100644 --- a/docs/source/tutorials/code_samples.rst +++ b/docs/source/tutorials/code_samples.rst @@ -31,6 +31,7 @@ Code samples are used for automated testing. They are also a great starting poin - :ref:`IMU Rotation Vector` - Rotation vector at 400 hz rate - :ref:`Edge detector` - Edge detection on input frame - :ref:`Script camera control` - Controlling the camera with the Script node +- :ref:`Bootloader version` - Retrieves Version of Bootloader on the device .. rubric:: Complex From 6f7a58e7bbec056bdc9d49ff66549db3f7fc2500 Mon Sep 17 00:00:00 2001 From: Martin Peterlin Date: Fri, 6 Aug 2021 21:57:46 +0200 Subject: [PATCH 39/40] Added Script Properties alias --- depthai-core | 2 +- src/pipeline/NodeBindings.cpp | 13 ++++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/depthai-core b/depthai-core index b7a05f7ea..ad8be024d 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit b7a05f7ea89ea9397b54d9ae5e4e6c922661e9cc +Subproject commit ad8be024d2e55136f12b72e4d40b831736ee7230 diff --git a/src/pipeline/NodeBindings.cpp b/src/pipeline/NodeBindings.cpp index 1d8e23603..b54ed085a 100644 --- a/src/pipeline/NodeBindings.cpp +++ b/src/pipeline/NodeBindings.cpp @@ -147,7 +147,7 @@ void NodeBindings::bind(pybind11::module& m, void* pCallstack){ // Move properties into nodes and nodes under 'node' submodule daiNodeModule = m.def_submodule("node"); - + // Properties py::class_ colorCameraProperties(m, "ColorCameraProperties", DOC(dai, ColorCameraProperties)); py::enum_ colorCameraPropertiesSensorResolution(colorCameraProperties, "SensorResolution", DOC(dai, ColorCameraProperties, SensorResolution)); py::enum_ colorCameraPropertiesColorOrder(colorCameraProperties, "ColorOrder", DOC(dai, ColorCameraProperties, ColorOrder)); @@ -178,6 +178,9 @@ void NodeBindings::bind(pybind11::module& m, void* pCallstack){ py::enum_ nodeInputType(pyInput, "Type"); py::class_ pyOutput(pyNode, "Output", DOC(dai, Node, Output)); py::enum_ nodeOutputType(pyOutput, "Type"); + py::class_ scriptProperties(m, "ScriptProperties", DOC(dai, ScriptProperties)); + + // Node::Id bindings py::class_(pyNode, "Id", "Node identificator. Unique for every node on a single Pipeline"); // Node::Connection bindings @@ -452,6 +455,13 @@ void NodeBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("numFrames", &SPIInProperties::numFrames) ; + // Script properties + scriptProperties + .def_readwrite("scriptUri", &ScriptProperties::scriptUri, DOC(dai, ScriptProperties, scriptUri)) + .def_readwrite("scriptName", &ScriptProperties::scriptName, DOC(dai, ScriptProperties, scriptName)) + .def_readwrite("processor", &ScriptProperties::processor, DOC(dai, ScriptProperties, processor)) + ; + //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// @@ -985,6 +995,7 @@ void NodeBindings::bind(pybind11::module& m, void* pCallstack){ .def("setProcessor", &Script::setProcessor, DOC(dai, node, Script, setProcessor)) .def("getProcessor", &Script::getProcessor, DOC(dai, node, Script, getProcessor)) ; + daiNodeModule.attr("Script").attr("Properties") = scriptProperties; // IMU node From 1c392869b7ecd8e1420c3026788bc4ab333a5fc1 Mon Sep 17 00:00:00 2001 From: Martin Peterlin Date: Fri, 6 Aug 2021 22:28:40 +0200 Subject: [PATCH 40/40] Updated core --- depthai-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depthai-core b/depthai-core index ad8be024d..22233219b 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit ad8be024d2e55136f12b72e4d40b831736ee7230 +Subproject commit 22233219b50f4918fe9cbb4c0c0ec232383eb78f