diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index e4145a19d..e6a02d277 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -538,3 +538,15 @@ jobs: repository: luxonis/robothub-apps event-type: depthai-python-release client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' + + notify_hil_workflow_linux_x86_64: + needs: [build-linux-x86_64] + runs-on: ubuntu-latest + steps: + - name: Repository Dispatch + uses: peter-evans/repository-dispatch@v2 + with: + token: ${{ secrets.HIL_CORE_DISPATCH_TOKEN }} + repository: luxonis/depthai-core-hil-tests + event-type: python-hil-event + client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' \ No newline at end of file diff --git a/.gitignore b/.gitignore index c6d3c5cc3..98f9cb60a 100644 --- a/.gitignore +++ b/.gitignore @@ -38,6 +38,7 @@ wheelhouse/ .venv env/ venv/ +venv_*/ ENV/ env.bak/ venv.bak/ diff --git a/CMakeLists.txt b/CMakeLists.txt index 50c01aa89..2183df54f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -107,6 +107,7 @@ pybind11_add_module(${TARGET_NAME} src/pipeline/node/XLinkInBindings.cpp src/pipeline/node/XLinkOutBindings.cpp src/pipeline/node/ColorCameraBindings.cpp + src/pipeline/node/CameraBindings.cpp src/pipeline/node/MonoCameraBindings.cpp src/pipeline/node/StereoDepthBindings.cpp src/pipeline/node/NeuralNetworkBindings.cpp diff --git a/depthai-core b/depthai-core index 690b6a637..48484a573 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 690b6a637942cd56959c549c8e4be07aee676385 +Subproject commit 48484a5731b99699461c6ca34317063ba6ea2608 diff --git a/examples/Camera/camera_isp.py b/examples/Camera/camera_isp.py new file mode 100755 index 000000000..5173d56e9 --- /dev/null +++ b/examples/Camera/camera_isp.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai +import time + +# Connect to device and start pipeline +with dai.Device() as device: + # Device name + print('Device name:', device.getDeviceName()) + # Bootloader version + if device.getBootloaderVersion() is not None: + print('Bootloader version:', device.getBootloaderVersion()) + # Print out usb speed + print('Usb speed:', device.getUsbSpeed().name) + # Connected cameras + print('Connected cameras:', device.getConnectedCameraFeatures()) + + # Create pipeline + pipeline = dai.Pipeline() + cams = device.getConnectedCameraFeatures() + streams = [] + for cam in cams: + print(str(cam), str(cam.socket), cam.socket) + c = pipeline.create(dai.node.Camera) + x = pipeline.create(dai.node.XLinkOut) + c.isp.link(x.input) + c.setBoardSocket(cam.socket) + stream = str(cam.socket) + if cam.name: + stream = f'{cam.name} ({stream})' + x.setStreamName(stream) + streams.append(stream) + + # Start pipeline + device.startPipeline(pipeline) + fpsCounter = {} + lastFpsCount = {} + tfps = time.time() + while not device.isClosed(): + queueNames = device.getQueueEvents(streams) + for stream in queueNames: + messages = device.getOutputQueue(stream).tryGetAll() + fpsCounter[stream] = fpsCounter.get(stream, 0.0) + len(messages) + for message in messages: + # Display arrived frames + if type(message) == dai.ImgFrame: + # render fps + fps = lastFpsCount.get(stream, 0) + frame = message.getCvFrame() + cv2.putText(frame, "Fps: {:.2f}".format(fps), (10, 10), cv2.FONT_HERSHEY_TRIPLEX, 0.4, (255,255,255)) + cv2.imshow(stream, frame) + + if time.time() - tfps >= 1.0: + scale = time.time() - tfps + for stream in fpsCounter.keys(): + lastFpsCount[stream] = fpsCounter[stream] / scale + fpsCounter = {} + tfps = time.time() + + if cv2.waitKey(1) == ord('q'): + break diff --git a/examples/Camera/camera_preview.py b/examples/Camera/camera_preview.py new file mode 100755 index 000000000..ba9aa108a --- /dev/null +++ b/examples/Camera/camera_preview.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai +import time + +# Connect to device and start pipeline +with dai.Device(dai.OpenVINO.DEFAULT_VERSION, dai.UsbSpeed.SUPER_PLUS) as device: + # Device name + print('Device name:', device.getDeviceName()) + # Bootloader version + if device.getBootloaderVersion() is not None: + print('Bootloader version:', device.getBootloaderVersion()) + # Print out usb speed + print('Usb speed:', device.getUsbSpeed().name) + # Connected cameras + print('Connected cameras:', device.getConnectedCameraFeatures()) + + # Create pipeline + pipeline = dai.Pipeline() + cams = device.getConnectedCameraFeatures() + streams = [] + for cam in cams: + print(str(cam), str(cam.socket), cam.socket) + c = pipeline.create(dai.node.Camera) + x = pipeline.create(dai.node.XLinkOut) + c.preview.link(x.input) + c.setBoardSocket(cam.socket) + stream = str(cam.socket) + if cam.name: + stream = f'{cam.name} ({stream})' + x.setStreamName(stream) + streams.append(stream) + + # Start pipeline + device.startPipeline(pipeline) + fpsCounter = {} + lastFpsCount = {} + tfps = time.time() + while not device.isClosed(): + queueNames = device.getQueueEvents(streams) + for stream in queueNames: + messages = device.getOutputQueue(stream).tryGetAll() + fpsCounter[stream] = fpsCounter.get(stream, 0.0) + len(messages) + for message in messages: + # Display arrived frames + if type(message) == dai.ImgFrame: + # render fps + fps = lastFpsCount.get(stream, 0) + frame = message.getCvFrame() + cv2.putText(frame, "Fps: {:.2f}".format(fps), (10, 10), cv2.FONT_HERSHEY_TRIPLEX, 0.4, (255,255,255)) + cv2.imshow(stream, frame) + + if time.time() - tfps >= 1.0: + scale = time.time() - tfps + for stream in fpsCounter.keys(): + lastFpsCount[stream] = fpsCounter[stream] / scale + fpsCounter = {} + tfps = time.time() + + if cv2.waitKey(1) == ord('q'): + break diff --git a/examples/ColorCamera/rgb_preview.py b/examples/ColorCamera/rgb_preview.py index a07e464df..0cdc49b63 100755 --- a/examples/ColorCamera/rgb_preview.py +++ b/examples/ColorCamera/rgb_preview.py @@ -23,7 +23,7 @@ # Connect to device and start pipeline with dai.Device(pipeline) as device: - print('Connected cameras:', device.getConnectedCameras()) + print('Connected cameras:', device.getConnectedCameraFeatures()) # Print out usb speed print('Usb speed:', device.getUsbSpeed().name) # Bootloader version diff --git a/examples/ImageManip/image_manip_warp_mesh.py b/examples/ImageManip/image_manip_warp_mesh.py index 3eeaa43da..eadff4c77 100644 --- a/examples/ImageManip/image_manip_warp_mesh.py +++ b/examples/ImageManip/image_manip_warp_mesh.py @@ -12,7 +12,7 @@ maxFrameSize = camRgb.getPreviewWidth() * camRgb.getPreviewHeight() * 3 # Warp preview frame 1 -manip1 = pipeline.create(dai.node.Warp) +manip1 = pipeline.create(dai.node.ImageManip) # Create a custom warp mesh tl = dai.Point2f(20, 20) tr = dai.Point2f(460, 20) diff --git a/examples/StereoDepth/depth_colormap.py b/examples/StereoDepth/depth_colormap.py new file mode 100755 index 000000000..05d86f85c --- /dev/null +++ b/examples/StereoDepth/depth_colormap.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai +import numpy as np + +# Closer-in minimum depth, disparity range is doubled (from 95 to 190): +extended_disparity = False +# Better accuracy for longer distance, fractional disparity 32-levels: +subpixel = False +# Better handling for occlusions: +lr_check = True + +# Create pipeline +pipeline = dai.Pipeline() + +# Define sources and outputs +monoLeft = pipeline.create(dai.node.MonoCamera) +monoRight = pipeline.create(dai.node.MonoCamera) +depth = pipeline.create(dai.node.StereoDepth) +xout = pipeline.create(dai.node.XLinkOut) + +xout.setStreamName("disparity") + +# Properties +monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) +monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) +monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) + +# Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) +depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) +# Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default) +depth.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7) +depth.setLeftRightCheck(lr_check) +depth.setExtendedDisparity(extended_disparity) +depth.setSubpixel(subpixel) + +# Create a colormap +colormap = pipeline.create(dai.node.ImageManip) +colormap.initialConfig.setColormap(dai.Colormap.STEREO_TURBO, depth.initialConfig.getMaxDisparity()) +colormap.initialConfig.setFrameType(dai.ImgFrame.Type.NV12) + +# Linking +monoLeft.out.link(depth.left) +monoRight.out.link(depth.right) +depth.disparity.link(colormap.inputImage) +colormap.out.link(xout.input) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + + # Output queue will be used to get the disparity frames from the outputs defined above + q = device.getOutputQueue(name="disparity", maxSize=4, blocking=False) + + while True: + inDisparity = q.get() # blocking call, will wait until a new data has arrived + frame = inDisparity.getCvFrame() + cv2.imshow("disparity", frame) + + if cv2.waitKey(1) == ord('q'): + break \ No newline at end of file diff --git a/examples/StereoDepth/depth_preview_lr.py b/examples/StereoDepth/depth_preview_lr.py new file mode 100755 index 000000000..7d62b0302 --- /dev/null +++ b/examples/StereoDepth/depth_preview_lr.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai +import numpy as np + +# Closer-in minimum depth, disparity range is doubled (from 95 to 190): +extended_disparity = True +# Better accuracy for longer distance, fractional disparity 32-levels: +subpixel = True +# Better handling for occlusions: +lr_check = True + +# Create pipeline +pipeline = dai.Pipeline() + +# Define sources and outputs +left = pipeline.create(dai.node.ColorCamera) +right = pipeline.create(dai.node.ColorCamera) +depth = pipeline.create(dai.node.StereoDepth) +xout = pipeline.create(dai.node.XLinkOut) +xoutl = pipeline.create(dai.node.XLinkOut) +xoutr = pipeline.create(dai.node.XLinkOut) + +xout.setStreamName("disparity") +xoutl.setStreamName("rectifiedLeft") +xoutr.setStreamName("rectifiedRight") + +# Properties +left.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P) +left.setBoardSocket(dai.CameraBoardSocket.LEFT) +right.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P) +right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setIspScale(2, 3) +left.setIspScale(2, 3) + + +# Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) +depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) +# Options: MEDIAN_OFF, KERNEL_3x3, KERNEL_5x5, KERNEL_7x7 (default) +depth.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7) +depth.setInputResolution(1280, 800) +depth.setLeftRightCheck(lr_check) +depth.setExtendedDisparity(extended_disparity) +depth.setSubpixel(subpixel) +depth.setInputResolution(1280, 800) + +# Linking +left.isp.link(depth.left) +right.isp.link(depth.right) +depth.disparity.link(xout.input) +depth.rectifiedLeft.link(xoutl.input) +depth.rectifiedRight.link(xoutr.input) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + while not device.isClosed(): + queueNames = device.getQueueEvents() + for q in queueNames: + message = device.getOutputQueue(q).get() + # Display arrived frames + if type(message) == dai.ImgFrame: + frame = message.getCvFrame() + if 'disparity' in q: + maxDisp = depth.initialConfig.getMaxDisparity() + disp = (frame * (255.0 / maxDisp)).astype(np.uint8) + disp = cv2.applyColorMap(disp, cv2.COLORMAP_JET) + cv2.imshow(q, disp) + else: + cv2.imshow(q, frame) + if cv2.waitKey(1) == ord('q'): + break \ No newline at end of file diff --git a/examples/VideoEncoder/disparity_colormap_encoding.py b/examples/VideoEncoder/disparity_colormap_encoding.py new file mode 100755 index 000000000..09d602b10 --- /dev/null +++ b/examples/VideoEncoder/disparity_colormap_encoding.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 + +import depthai as dai + +# Create pipeline +pipeline = dai.Pipeline() + +# Create left/right mono cameras for Stereo depth +monoLeft = pipeline.create(dai.node.MonoCamera) +monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) +monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) + +monoRight = pipeline.create(dai.node.MonoCamera) +monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) +monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) + +# Create a node that will produce the depth map +depth = pipeline.create(dai.node.StereoDepth) +depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) +depth.initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7) +depth.setLeftRightCheck(False) +depth.setExtendedDisparity(False) +# Subpixel disparity is of UINT16 format, which is unsupported by VideoEncoder +depth.setSubpixel(False) +monoLeft.out.link(depth.left) +monoRight.out.link(depth.right) + +# Colormap +colormap = pipeline.create(dai.node.ImageManip) +colormap.initialConfig.setColormap(dai.Colormap.TURBO, depth.initialConfig.getMaxDisparity()) +colormap.initialConfig.setFrameType(dai.ImgFrame.Type.NV12) + +videoEnc = pipeline.create(dai.node.VideoEncoder) +# Depth resolution/FPS will be the same as mono resolution/FPS +videoEnc.setDefaultProfilePreset(monoLeft.getFps(), dai.VideoEncoderProperties.Profile.H264_HIGH) + +# Link +depth.disparity.link(colormap.inputImage) +colormap.out.link(videoEnc.input) + +xout = pipeline.create(dai.node.XLinkOut) +xout.setStreamName("enc") +videoEnc.bitstream.link(xout.input) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + + # Output queue will be used to get the encoded data from the output defined above + q = device.getOutputQueue(name="enc") + + # The .h265 file is a raw stream file (not playable yet) + with open('disparity.h264', 'wb') as videoFile: + print("Press Ctrl+C to stop encoding...") + try: + while True: + videoFile.write(q.get().getData()) + except KeyboardInterrupt: + # Keyboard interrupt (Ctrl + C) detected + pass + + print("To view the encoded data, convert the stream file (.mjpeg) into a video file (.mp4) using a command below:") + print("ffmpeg -framerate 30 -i disparity.mjpeg -c copy video.mp4") diff --git a/examples/device/device_all_boot_bootloader.py b/examples/device/device_all_boot_bootloader.py new file mode 100644 index 000000000..4c2e9a56b --- /dev/null +++ b/examples/device/device_all_boot_bootloader.py @@ -0,0 +1,6 @@ +import depthai as dai + +devices = dai.Device.getAllConnectedDevices() + +for device in devices: + dai.XLinkConnection.bootBootloader(device) diff --git a/examples/device/device_boot_non_exclusive.py b/examples/device/device_boot_non_exclusive.py new file mode 100644 index 000000000..898b292f6 --- /dev/null +++ b/examples/device/device_boot_non_exclusive.py @@ -0,0 +1,10 @@ +import depthai as dai +import time + +cfg = dai.Device.Config() +cfg.nonExclusiveMode = True + +with dai.Device(cfg) as device: + while not device.isClosed(): + print('CPU usage:',device.getLeonCssCpuUsage().average) + time.sleep(1) \ No newline at end of file diff --git a/src/DeviceBindings.cpp b/src/DeviceBindings.cpp index f001781d5..a61e3e770 100644 --- a/src/DeviceBindings.cpp +++ b/src/DeviceBindings.cpp @@ -2,6 +2,7 @@ // depthai #include "depthai/device/Device.hpp" +#include "depthai/device/EepromError.hpp" #include "depthai/pipeline/Pipeline.hpp" #include "depthai/utility/Clock.hpp" #include "depthai/xlink/XLinkConnection.hpp" @@ -288,14 +289,24 @@ static void bindConstructors(ARG& arg){ }), py::arg("config"), py::arg("deviceInfo"), DOC(dai, DeviceBase, DeviceBase, 19)) // DeviceInfo version - .def(py::init([](const DeviceInfo& deviceInfo){ + .def(py::init([](const DeviceInfo& deviceInfo){ py::gil_scoped_release release; return std::make_unique(deviceInfo); }), py::arg("deviceInfo"), DOC(dai, DeviceBase, DeviceBase, 20)) - .def(py::init([](const DeviceInfo& deviceInfo, UsbSpeed maxUsbSpeed){ + .def(py::init([](const DeviceInfo& deviceInfo, UsbSpeed maxUsbSpeed){ py::gil_scoped_release release; return std::make_unique(deviceInfo, maxUsbSpeed); }), py::arg("deviceInfo"), py::arg("maxUsbSpeed"), DOC(dai, DeviceBase, DeviceBase, 21)) + + // name or device id version + .def(py::init([](std::string nameOrDeviceId){ + py::gil_scoped_release release; + return std::make_unique(std::move(nameOrDeviceId)); + }), py::arg("nameOrDeviceId"), DOC(dai, DeviceBase, DeviceBase, 22)) + .def(py::init([](std::string nameOrDeviceId, UsbSpeed maxUsbSpeed){ + py::gil_scoped_release release; + return std::make_unique(std::move(nameOrDeviceId), maxUsbSpeed); + }), py::arg("nameOrDeviceId"), py::arg("maxUsbSpeed"), DOC(dai, DeviceBase, DeviceBase, 23)) ; } @@ -328,6 +339,13 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ py::bind_map_patched>(boardConfig, "UARTMap"); + // pybind11 limitation of having actual classes as exceptions + // Possible but requires a larger workaround + // https://stackoverflow.com/questions/62087383/how-can-you-bind-exceptions-with-custom-fields-and-constructors-in-pybind11-and + + // Bind EepromError + auto eepromError = py::register_exception(m, "EepromError", PyExc_RuntimeError); + /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// @@ -451,6 +469,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def(py::init<>()) .def_readwrite("version", &Device::Config::version) .def_readwrite("board", &Device::Config::board) + .def_readwrite("nonExclusiveMode", &Device::Config::nonExclusiveMode) ; // Bind constructors @@ -536,7 +555,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def("flashFactoryEepromClear", [](DeviceBase& d) { py::gil_scoped_release release; d.flashFactoryEepromClear(); }, DOC(dai, DeviceBase, flashFactoryEepromClear)) .def("setTimesync", [](DeviceBase& d, std::chrono::milliseconds p, int s, bool r) { py::gil_scoped_release release; return d.setTimesync(p,s,r); }, DOC(dai, DeviceBase, setTimesync)) .def("setTimesync", [](DeviceBase& d, bool e) { py::gil_scoped_release release; return d.setTimesync(e); }, py::arg("enable"), DOC(dai, DeviceBase, setTimesync, 2)) - .def("getDeviceName", [](DeviceBase& d) { py::gil_scoped_release release; return d.getDeviceName(); }, DOC(dai, DeviceBase, getDeviceName)) + .def("getDeviceName", [](DeviceBase& d) { std::string name; { py::gil_scoped_release release; name = d.getDeviceName(); } return py::bytes(name).attr("decode")("utf-8", "replace"); }, DOC(dai, DeviceBase, getDeviceName)) ; diff --git a/src/pipeline/CommonBindings.cpp b/src/pipeline/CommonBindings.cpp index 952014582..2072555d8 100644 --- a/src/pipeline/CommonBindings.cpp +++ b/src/pipeline/CommonBindings.cpp @@ -19,6 +19,11 @@ #include "depthai-shared/common/DetectionParserOptions.hpp" #include "depthai-shared/common/RotatedRect.hpp" #include "depthai-shared/common/Rect.hpp" +#include "depthai-shared/common/Colormap.hpp" + +// depthai +#include "depthai/common/CameraFeatures.hpp" +#include "depthai/common/CameraExposureOffset.hpp" void CommonBindings::bind(pybind11::module& m, void* pCallstack){ @@ -47,6 +52,8 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ py::class_ detectionParserOptions(m, "DetectionParserOptions", DOC(dai, DetectionParserOptions)); py::class_ rotatedRect(m, "RotatedRect", DOC(dai, RotatedRect)); py::class_ rect(m, "Rect", DOC(dai, Rect)); + py::enum_ cameraExposureOffset(m, "CameraExposureOffset"); + py::enum_ colormap(m, "Colormap", DOC(dai, Colormap)); /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// @@ -162,6 +169,12 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("orientation", &CameraFeatures::orientation) .def_readwrite("supportedTypes", &CameraFeatures::supportedTypes) .def_readwrite("hasAutofocus", &CameraFeatures::hasAutofocus) + .def_readwrite("name", &CameraFeatures::name) + .def("__repr__", [](CameraFeatures& camera) { + std::stringstream stream; + stream << camera; + return stream.str(); + }); ; // MemoryInfo @@ -281,4 +294,38 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("iouThreshold", &DetectionParserOptions::iouThreshold) ; + cameraExposureOffset + .value("START", CameraExposureOffset::START) + .value("MIDDLE", CameraExposureOffset::MIDDLE) + .value("END", CameraExposureOffset::END) + ; + + colormap + .value("NONE", Colormap::NONE) + .value("JET", Colormap::JET) + .value("TURBO", Colormap::TURBO) + .value("STEREO_JET", Colormap::STEREO_JET) + .value("STEREO_TURBO", Colormap::STEREO_TURBO) + // .value("AUTUMN", Colormap::AUTUMN) + // .value("BONE", Colormap::BONE) + // .value("WINTER", Colormap::WINTER) + // .value("RAINBOW", Colormap::RAINBOW) + // .value("OCEAN", Colormap::OCEAN) + // .value("SUMMER", Colormap::SUMMER) + // .value("SPRING", Colormap::SPRING) + // .value("COOL", Colormap::COOL) + // .value("HSV", Colormap::HSV) + // .value("PINK", Colormap::PINK) + // .value("HOT", Colormap::HOT) + // .value("PARULA", Colormap::PARULA) + // .value("MAGMA", Colormap::MAGMA) + // .value("INFERNO", Colormap::INFERNO) + // .value("PLASMA", Colormap::PLASMA) + // .value("VIRIDIS", Colormap::VIRIDIS) + // .value("CIVIDIS", Colormap::CIVIDIS) + // .value("TWILIGHT", Colormap::TWILIGHT) + // .value("TWILIGHT_SHIFTED", Colormap::TWILIGHT_SHIFTED) + // .value("DEEPGREEN", Colormap::DEEPGREEN) + ; + } diff --git a/src/pipeline/PipelineBindings.cpp b/src/pipeline/PipelineBindings.cpp index 22ea646dd..cfdd9f588 100644 --- a/src/pipeline/PipelineBindings.cpp +++ b/src/pipeline/PipelineBindings.cpp @@ -10,6 +10,7 @@ #include "depthai/pipeline/node/XLinkOut.hpp" #include "depthai/pipeline/node/NeuralNetwork.hpp" #include "depthai/pipeline/node/ColorCamera.hpp" +#include "depthai/pipeline/node/Camera.hpp" #include "depthai/pipeline/node/VideoEncoder.hpp" #include "depthai/pipeline/node/SPIOut.hpp" #include "depthai/pipeline/node/SPIIn.hpp" diff --git a/src/pipeline/datatype/ImageManipConfigBindings.cpp b/src/pipeline/datatype/ImageManipConfigBindings.cpp index 1ead6d0cc..847d69d5a 100644 --- a/src/pipeline/datatype/ImageManipConfigBindings.cpp +++ b/src/pipeline/datatype/ImageManipConfigBindings.cpp @@ -111,7 +111,10 @@ void bind_imagemanipconfig(pybind11::module& m, void* pCallstack){ .def("setResize", static_cast)>(&ImageManipConfig::setResize), py::arg("size"), DOC(dai, ImageManipConfig, setResize, 2)) .def("setResizeThumbnail", static_cast(&ImageManipConfig::setResizeThumbnail), py::arg("w"), py::arg("h"), py::arg("bgRed")=0, py::arg("bgGreen")=0, py::arg("bgBlue")=0, DOC(dai, ImageManipConfig, setResizeThumbnail)) .def("setResizeThumbnail", static_cast, int, int, int)>(&ImageManipConfig::setResizeThumbnail), py::arg("size"), py::arg("bgRed")=0, py::arg("bgGreen")=0, py::arg("bgBlue")=0, DOC(dai, ImageManipConfig, setResizeThumbnail, 2)) - .def("setFrameType", &ImageManipConfig::setFrameType, py::arg("name"), DOC(dai, ImageManipConfig, setFrameType)) + .def("setFrameType", &ImageManipConfig::setFrameType, py::arg("type"), DOC(dai, ImageManipConfig, setFrameType)) + .def("setColormap", static_cast(&ImageManipConfig::setColormap), py::arg("colormap"), py::arg("min"), py::arg("max"), DOC(dai, ImageManipConfig, setColormap)) + .def("setColormap", static_cast(&ImageManipConfig::setColormap), py::arg("colormap"), py::arg("max") = 255, DOC(dai, ImageManipConfig, setColormap)) + .def("setColormap", static_cast(&ImageManipConfig::setColormap), py::arg("colormap"), py::arg("max") = 255.0f, DOC(dai, ImageManipConfig, setColormap)) .def("setHorizontalFlip", &ImageManipConfig::setHorizontalFlip, py::arg("flip"), DOC(dai, ImageManipConfig, setHorizontalFlip)) .def("setVerticalFlip", &ImageManipConfig::setVerticalFlip, py::arg("flip"), DOC(dai, ImageManipConfig, setVerticalFlip)) .def("setReusePreviousImage", &ImageManipConfig::setReusePreviousImage, py::arg("reuse"), DOC(dai, ImageManipConfig, setReusePreviousImage)) @@ -129,6 +132,7 @@ void bind_imagemanipconfig(pybind11::module& m, void* pCallstack){ .def("getResizeConfig", &ImageManipConfig::getResizeConfig, DOC(dai, ImageManipConfig, getResizeConfig)) .def("getFormatConfig", &ImageManipConfig::getFormatConfig, DOC(dai, ImageManipConfig, getFormatConfig)) .def("isResizeThumbnail", &ImageManipConfig::isResizeThumbnail, DOC(dai, ImageManipConfig, isResizeThumbnail)) + .def("getColormap", &ImageManipConfig::getColormap, DOC(dai, ImageManipConfig, getColormap)) ; diff --git a/src/pipeline/datatype/ImgFrameBindings.cpp b/src/pipeline/datatype/ImgFrameBindings.cpp index ee867395b..bc2c8b768 100644 --- a/src/pipeline/datatype/ImgFrameBindings.cpp +++ b/src/pipeline/datatype/ImgFrameBindings.cpp @@ -117,8 +117,10 @@ void bind_imgframe(pybind11::module& m, void* pCallstack){ imgFrame .def(py::init<>()) // getters - .def("getTimestamp", &ImgFrame::getTimestamp, DOC(dai, ImgFrame, getTimestamp)) - .def("getTimestampDevice", &ImgFrame::getTimestampDevice, DOC(dai, ImgFrame, getTimestampDevice)) + .def("getTimestamp", py::overload_cast<>(&ImgFrame::getTimestamp, py::const_), DOC(dai, ImgFrame, getTimestamp)) + .def("getTimestampDevice", py::overload_cast<>(&ImgFrame::getTimestampDevice, py::const_), DOC(dai, ImgFrame, getTimestampDevice)) + .def("getTimestamp", py::overload_cast(&ImgFrame::getTimestamp, py::const_), py::arg("offset"), DOC(dai, ImgFrame, getTimestamp)) + .def("getTimestampDevice", py::overload_cast(&ImgFrame::getTimestampDevice, py::const_), py::arg("offset"), DOC(dai, ImgFrame, getTimestampDevice)) .def("getInstanceNum", &ImgFrame::getInstanceNum, DOC(dai, ImgFrame, getInstanceNum)) .def("getCategory", &ImgFrame::getCategory, DOC(dai, ImgFrame, getCategory)) .def("getSequenceNum", &ImgFrame::getSequenceNum, DOC(dai, ImgFrame, getSequenceNum)) diff --git a/src/pipeline/node/CameraBindings.cpp b/src/pipeline/node/CameraBindings.cpp new file mode 100644 index 000000000..421d0a7b9 --- /dev/null +++ b/src/pipeline/node/CameraBindings.cpp @@ -0,0 +1,164 @@ +#include "NodeBindings.hpp" +#include "Common.hpp" + +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/Node.hpp" +#include "depthai/pipeline/node/Camera.hpp" + +void bind_camera(pybind11::module& m, void* pCallstack){ + + using namespace dai; + using namespace dai::node; + + // Node and Properties declare upfront + py::class_ cameraProperties(m, "CameraProperties", DOC(dai, CameraProperties)); + py::enum_ cameraPropertiesWarpMeshSource(cameraProperties, "WarpMeshSource", DOC(dai, CameraProperties, WarpMeshSource)); + py::enum_ cameraPropertiesColorOrder(cameraProperties, "ColorOrder", DOC(dai, CameraProperties, ColorOrder)); + auto camera = ADD_NODE(Camera); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // // Camera Properties + // cameraPropertiesSensorResolution + // .value("THE_1080_P", CameraProperties::SensorResolution::THE_1080_P) + // .value("THE_1200_P", CameraProperties::SensorResolution::THE_1200_P) + // .value("THE_4_K", CameraProperties::SensorResolution::THE_4_K) + // .value("THE_5_MP", CameraProperties::SensorResolution::THE_5_MP) + // .value("THE_12_MP", CameraProperties::SensorResolution::THE_12_MP) + // .value("THE_4000X3000", CameraProperties::SensorResolution::THE_4000X3000) + // .value("THE_13_MP", CameraProperties::SensorResolution::THE_13_MP) + // .value("THE_5312X6000", CameraProperties::SensorResolution::THE_5312X6000) + // .value("THE_48_MP", CameraProperties::SensorResolution::THE_48_MP) + // .value("THE_720_P", CameraProperties::SensorResolution::THE_720_P) + // .value("THE_800_P", CameraProperties::SensorResolution::THE_800_P) + // ; + + // Camera Properties - WarpMeshSource + cameraPropertiesWarpMeshSource + .value("AUTO", CameraProperties::WarpMeshSource::AUTO) + .value("NONE", CameraProperties::WarpMeshSource::NONE) + .value("CALIBRATION", CameraProperties::WarpMeshSource::CALIBRATION) + .value("URI", CameraProperties::WarpMeshSource::URI) + ; + + cameraPropertiesColorOrder + .value("BGR", CameraProperties::ColorOrder::BGR) + .value("RGB", CameraProperties::ColorOrder::RGB) + ; + + cameraProperties + .def_readwrite("initialControl", &CameraProperties::initialControl) + .def_readwrite("boardSocket", &CameraProperties::boardSocket) + .def_readwrite("imageOrientation", &CameraProperties::imageOrientation) + .def_readwrite("colorOrder", &CameraProperties::colorOrder) + .def_readwrite("interleaved", &CameraProperties::interleaved) + .def_readwrite("fp16", &CameraProperties::fp16) + .def_readwrite("previewHeight", &CameraProperties::previewHeight) + .def_readwrite("previewWidth", &CameraProperties::previewWidth) + .def_readwrite("videoHeight", &CameraProperties::videoHeight) + .def_readwrite("videoWidth", &CameraProperties::videoWidth) + .def_readwrite("stillHeight", &CameraProperties::stillHeight) + .def_readwrite("stillWidth", &CameraProperties::stillWidth) + // .def_readwrite("resolution", &CameraProperties::resolution) + .def_readwrite("fps", &CameraProperties::fps) + .def_readwrite("sensorCropX", &CameraProperties::sensorCropX) + .def_readwrite("sensorCropY", &CameraProperties::sensorCropY) + .def_readwrite("previewKeepAspectRatio", &CameraProperties::previewKeepAspectRatio) + .def_readwrite("ispScale", &CameraProperties::ispScale) + + .def_readwrite("numFramesPoolRaw", &CameraProperties::numFramesPoolRaw) + .def_readwrite("numFramesPoolIsp", &CameraProperties::numFramesPoolIsp) + .def_readwrite("numFramesPoolVideo", &CameraProperties::numFramesPoolVideo) + .def_readwrite("numFramesPoolPreview", &CameraProperties::numFramesPoolPreview) + .def_readwrite("numFramesPoolStill", &CameraProperties::numFramesPoolStill) + + .def_readwrite("warpMeshSource", &CameraProperties::warpMeshSource) + .def_readwrite("warpMeshUri", &CameraProperties::warpMeshUri) + .def_readwrite("warpMeshWidth", &CameraProperties::warpMeshWidth) + .def_readwrite("warpMeshHeight", &CameraProperties::warpMeshHeight) + .def_readwrite("calibAlpha", &CameraProperties::calibAlpha) + .def_readwrite("warpMeshStepWidth", &CameraProperties::warpMeshStepWidth) + .def_readwrite("warpMeshStepHeight", &CameraProperties::warpMeshStepHeight) + ; + + // Camera node + camera + .def_readonly("inputConfig", &Camera::inputConfig, DOC(dai, node, Camera, inputConfig)) + .def_readonly("inputControl", &Camera::inputControl, DOC(dai, node, Camera, inputControl)) + .def_readonly("initialControl", &Camera::initialControl, DOC(dai, node, Camera, initialControl)) + .def_readonly("video", &Camera::video, DOC(dai, node, Camera, video)) + .def_readonly("preview", &Camera::preview, DOC(dai, node, Camera, preview)) + .def_readonly("still", &Camera::still, DOC(dai, node, Camera, still)) + .def_readonly("isp", &Camera::isp, DOC(dai, node, Camera, isp)) + .def_readonly("raw", &Camera::raw, DOC(dai, node, Camera, raw)) + .def_readonly("frameEvent", &Camera::frameEvent, DOC(dai, node, Camera, frameEvent)) + // .def_readonly("mockIsp", &Camera::mockIsp, DOC(dai, node, Camera, mockIsp)) + .def("setBoardSocket", &Camera::setBoardSocket, py::arg("boardSocket"), DOC(dai, node, Camera, setBoardSocket)) + .def("getBoardSocket", &Camera::getBoardSocket, DOC(dai, node, Camera, getBoardSocket)) + .def("setImageOrientation", &Camera::setImageOrientation, py::arg("imageOrientation"), DOC(dai, node, Camera, setImageOrientation)) + .def("getImageOrientation", &Camera::getImageOrientation, DOC(dai, node, Camera, getImageOrientation)) + .def("setPreviewSize", static_cast(&Camera::setPreviewSize), py::arg("width"), py::arg("height"), DOC(dai, node, Camera, setPreviewSize)) + .def("setPreviewSize", static_cast)>(&Camera::setPreviewSize), py::arg("size"), DOC(dai, node, Camera, setPreviewSize, 2)) + .def("setVideoSize", static_cast(&Camera::setVideoSize), py::arg("width"), py::arg("height"), DOC(dai, node, Camera, setVideoSize)) + .def("setVideoSize", static_cast)>(&Camera::setVideoSize), py::arg("size"), DOC(dai, node, Camera, setVideoSize, 2)) + .def("setStillSize", static_cast(&Camera::setStillSize), py::arg("width"), py::arg("height"), DOC(dai, node, Camera, setStillSize)) + .def("setStillSize", static_cast)>(&Camera::setStillSize), py::arg("size"), DOC(dai, node, Camera, setStillSize, 2)) + // .def("setResolution", &Camera::setResolution, py::arg("resolution"), DOC(dai, node, Camera, setResolution)) + // .def("getResolution", &Camera::getResolution, DOC(dai, node, Camera, getResolution)) + .def("setFps", &Camera::setFps, py::arg("fps"), DOC(dai, node, Camera, setFps)) + .def("getFps", &Camera::getFps, DOC(dai, node, Camera, getFps)) + .def("getPreviewSize", &Camera::getPreviewSize, DOC(dai, node, Camera, getPreviewSize)) + .def("getPreviewWidth", &Camera::getPreviewWidth, DOC(dai, node, Camera, getPreviewWidth)) + .def("getPreviewHeight", &Camera::getPreviewHeight, DOC(dai, node, Camera, getPreviewHeight)) + .def("getVideoSize", &Camera::getVideoSize, DOC(dai, node, Camera, getVideoSize)) + .def("getVideoWidth", &Camera::getVideoWidth, DOC(dai, node, Camera, getVideoWidth)) + .def("getVideoHeight", &Camera::getVideoHeight, DOC(dai, node, Camera, getVideoHeight)) + .def("getStillSize", &Camera::getStillSize, DOC(dai, node, Camera, getStillSize)) + .def("getStillWidth", &Camera::getStillWidth, DOC(dai, node, Camera, getStillWidth)) + .def("getStillHeight", &Camera::getStillHeight, DOC(dai, node, Camera, getStillHeight)) + .def("getSize", &Camera::getSize, DOC(dai, node, Camera, getSize)) + .def("getWidth", &Camera::getWidth, DOC(dai, node, Camera, getWidth)) + .def("getHeight", &Camera::getHeight, DOC(dai, node, Camera, getHeight)) + // .def("sensorCenterCrop", &Camera::sensorCenterCrop, DOC(dai, node, Camera, sensorCenterCrop)) + // .def("setSensorCrop", &Camera::setSensorCrop, py::arg("x"), py::arg("y"), DOC(dai, node, Camera, setSensorCrop)) + // .def("getSensorCrop", &Camera::getSensorCrop, DOC(dai, node, Camera, getSensorCrop)) + // .def("getSensorCropX", &Camera::getSensorCropX, DOC(dai, node, Camera, getSensorCropX)) + // .def("getSensorCropY", &Camera::getSensorCropY, DOC(dai, node, Camera, getSensorCropY)) + + // .def("setIspScale", static_cast(&Camera::setIspScale), py::arg("numerator"), py::arg("denominator"), DOC(dai, node, Camera, setIspScale)) + // .def("setIspScale", static_cast)>(&Camera::setIspScale), py::arg("scale"), DOC(dai, node, Camera, setIspScale, 2)) + // .def("setIspScale", static_cast(&Camera::setIspScale), py::arg("horizNum"), py::arg("horizDenom"), py::arg("vertNum"), py::arg("vertDenom"), DOC(dai, node, Camera, setIspScale, 3)) + // .def("setIspScale", static_cast,std::tuple)>(&Camera::setIspScale), py::arg("horizScale"), py::arg("vertScale"), DOC(dai, node, Camera, setIspScale, 4)) + + .def("setCamera", &Camera::setCamera, py::arg("name"), DOC(dai, node, Camera, setCamera)) + .def("getCamera", &Camera::getCamera, DOC(dai, node, Camera, getCamera)) + + .def("setSize", static_cast(&Camera::setSize), py::arg("width"), py::arg("height"), DOC(dai, node, Camera, setSize)) + .def("setSize", static_cast)>(&Camera::setSize), py::arg("size"), DOC(dai, node, Camera, setSize, 2)) + + + .def("setMeshSource", &Camera::setMeshSource, py::arg("source"), DOC(dai, node, Camera, setMeshSource)) + .def("getMeshSource", &Camera::getMeshSource, DOC(dai, node, Camera, getMeshSource)) + .def("loadMeshFile", &Camera::loadMeshFile, py::arg("warpMesh"), DOC(dai, node, Camera, loadMeshFile)) + .def("loadMeshData", &Camera::loadMeshData, py::arg("warpMesh"), DOC(dai, node, Camera, loadMeshData)) + .def("setMeshStep", &Camera::setMeshStep, py::arg("width"), py::arg("height"), DOC(dai, node, Camera, setMeshStep)) + .def("getMeshStep", &Camera::getMeshStep, DOC(dai, node, Camera, getMeshStep)) + .def("setCalibrationAlpha", &Camera::setCalibrationAlpha, py::arg("alpha"), DOC(dai, node, Camera, setCalibrationAlpha)) + .def("getCalibrationAlpha", &Camera::getCalibrationAlpha, DOC(dai, node, Camera, getCalibrationAlpha)) + + ; + // ALIAS + daiNodeModule.attr("Camera").attr("Properties") = cameraProperties; + +} diff --git a/src/pipeline/node/ColorCameraBindings.cpp b/src/pipeline/node/ColorCameraBindings.cpp index e1e8eb060..143e3c5ff 100644 --- a/src/pipeline/node/ColorCameraBindings.cpp +++ b/src/pipeline/node/ColorCameraBindings.cpp @@ -42,6 +42,7 @@ void bind_colorcamera(pybind11::module& m, void* pCallstack){ .value("THE_48_MP", ColorCameraProperties::SensorResolution::THE_48_MP) .value("THE_720_P", ColorCameraProperties::SensorResolution::THE_720_P) .value("THE_800_P", ColorCameraProperties::SensorResolution::THE_800_P) + .value("THE_1440X1080", ColorCameraProperties::SensorResolution::THE_1440X1080) ; colorCameraPropertiesColorOrder @@ -73,6 +74,7 @@ void bind_colorcamera(pybind11::module& m, void* pCallstack){ .def_readwrite("numFramesPoolVideo", &ColorCameraProperties::numFramesPoolVideo) .def_readwrite("numFramesPoolPreview", &ColorCameraProperties::numFramesPoolPreview) .def_readwrite("numFramesPoolStill", &ColorCameraProperties::numFramesPoolStill) + .def_readwrite("eventFilter", &ColorCameraProperties::eventFilter) ; // ColorCamera node @@ -122,6 +124,8 @@ void bind_colorcamera(pybind11::module& m, void* pCallstack){ .def("getResolution", &ColorCamera::getResolution, DOC(dai, node, ColorCamera, getResolution)) .def("setFps", &ColorCamera::setFps, py::arg("fps"), DOC(dai, node, ColorCamera, setFps)) .def("getFps", &ColorCamera::getFps, DOC(dai, node, ColorCamera, getFps)) + .def("setFrameEventFilter", &ColorCamera::setFrameEventFilter, py::arg("events"), DOC(dai, node, ColorCamera, setFrameEventFilter)) + .def("getFrameEventFilter", &ColorCamera::getFrameEventFilter, DOC(dai, node, ColorCamera, getFrameEventFilter)) .def("getPreviewSize", &ColorCamera::getPreviewSize, DOC(dai, node, ColorCamera, getPreviewSize)) .def("getPreviewWidth", &ColorCamera::getPreviewWidth, DOC(dai, node, ColorCamera, getPreviewWidth)) .def("getPreviewHeight", &ColorCamera::getPreviewHeight, DOC(dai, node, ColorCamera, getPreviewHeight)) @@ -180,6 +184,8 @@ void bind_colorcamera(pybind11::module& m, void* pCallstack){ .def("getStillNumFramesPool", &ColorCamera::getStillNumFramesPool, DOC(dai, node, ColorCamera, getStillNumFramesPool)) .def("getRawNumFramesPool", &ColorCamera::getRawNumFramesPool, DOC(dai, node, ColorCamera, getRawNumFramesPool)) .def("getIspNumFramesPool", &ColorCamera::getIspNumFramesPool, DOC(dai, node, ColorCamera, getIspNumFramesPool)) + .def("setCamera", &ColorCamera::setCamera, py::arg("name"), DOC(dai, node, ColorCamera, setCamera)) + .def("getCamera", &ColorCamera::getCamera, DOC(dai, node, ColorCamera, getCamera)) ; // ALIAS daiNodeModule.attr("ColorCamera").attr("Properties") = colorCameraProperties; diff --git a/src/pipeline/node/MonoCameraBindings.cpp b/src/pipeline/node/MonoCameraBindings.cpp index ecd3abfe7..5ff0bf685 100644 --- a/src/pipeline/node/MonoCameraBindings.cpp +++ b/src/pipeline/node/MonoCameraBindings.cpp @@ -45,6 +45,7 @@ void bind_monocamera(pybind11::module& m, void* pCallstack){ .def_readwrite("fps", &MonoCameraProperties::fps) .def_readwrite("numFramesPool", &MonoCameraProperties::numFramesPool) .def_readwrite("numFramesPoolRaw", &MonoCameraProperties::numFramesPoolRaw) + .def_readwrite("eventFilter", &MonoCameraProperties::eventFilter) ; // Node @@ -76,6 +77,8 @@ void bind_monocamera(pybind11::module& m, void* pCallstack){ .def("getImageOrientation", &MonoCamera::getImageOrientation, DOC(dai, node, MonoCamera, getImageOrientation)) .def("setResolution", &MonoCamera::setResolution, py::arg("resolution"), DOC(dai, node, MonoCamera, setResolution)) .def("getResolution", &MonoCamera::getResolution, DOC(dai, node, MonoCamera, getResolution)) + .def("setFrameEventFilter", &MonoCamera::setFrameEventFilter, py::arg("events"), DOC(dai, node, MonoCamera, setFrameEventFilter)) + .def("getFrameEventFilter", &MonoCamera::getFrameEventFilter, DOC(dai, node, MonoCamera, getFrameEventFilter)) .def("setFps", &MonoCamera::setFps, py::arg("fps"), DOC(dai, node, MonoCamera, setFps)) .def("getFps", &MonoCamera::getFps, DOC(dai, node, MonoCamera, getFps)) .def("getResolutionSize", &MonoCamera::getResolutionSize, DOC(dai, node, MonoCamera, getResolutionSize)) @@ -85,7 +88,8 @@ void bind_monocamera(pybind11::module& m, void* pCallstack){ .def("getNumFramesPool", &MonoCamera::getNumFramesPool, DOC(dai, node, MonoCamera, getNumFramesPool)) .def("setRawNumFramesPool", &MonoCamera::setRawNumFramesPool, DOC(dai, node, MonoCamera, setRawNumFramesPool)) .def("getRawNumFramesPool", &MonoCamera::getRawNumFramesPool, DOC(dai, node, MonoCamera, getRawNumFramesPool)) - + .def("setCamera", &MonoCamera::setCamera, py::arg("name"), DOC(dai, node, MonoCamera, setCamera)) + .def("getCamera", &MonoCamera::getCamera, DOC(dai, node, MonoCamera, getCamera)) ; // ALIAS daiNodeModule.attr("MonoCamera").attr("Properties") = monoCameraProperties; diff --git a/src/pipeline/node/NodeBindings.cpp b/src/pipeline/node/NodeBindings.cpp index 21a591367..222ca997c 100644 --- a/src/pipeline/node/NodeBindings.cpp +++ b/src/pipeline/node/NodeBindings.cpp @@ -94,6 +94,7 @@ py::class_ bindNodeMap(py::handle scope, const std::string &na void bind_xlinkin(pybind11::module& m, void* pCallstack); void bind_xlinkout(pybind11::module& m, void* pCallstack); void bind_colorcamera(pybind11::module& m, void* pCallstack); +void bind_camera(pybind11::module& m, void* pCallstack); void bind_monocamera(pybind11::module& m, void* pCallstack); void bind_stereodepth(pybind11::module& m, void* pCallstack); void bind_neuralnetwork(pybind11::module& m, void* pCallstack); @@ -122,6 +123,7 @@ void NodeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_xlinkin); callstack.push_front(bind_xlinkout); callstack.push_front(bind_colorcamera); + callstack.push_front(bind_camera); callstack.push_front(bind_monocamera); callstack.push_front(bind_stereodepth); callstack.push_front(bind_neuralnetwork); diff --git a/src/pipeline/node/StereoDepthBindings.cpp b/src/pipeline/node/StereoDepthBindings.cpp index de473accd..1ed9e43cb 100644 --- a/src/pipeline/node/StereoDepthBindings.cpp +++ b/src/pipeline/node/StereoDepthBindings.cpp @@ -166,6 +166,8 @@ void bind_stereodepth(pybind11::module& m, void* pCallstack){ }, DOC(dai, node, StereoDepth, setFocalLengthFromCalibration)) .def("useHomographyRectification", &StereoDepth::useHomographyRectification, DOC(dai, node, StereoDepth, useHomographyRectification)) .def("enableDistortionCorrection", &StereoDepth::enableDistortionCorrection, DOC(dai, node, StereoDepth, enableDistortionCorrection)) + .def("setBaseline", &StereoDepth::setBaseline, DOC(dai, node, StereoDepth, setBaseline)) + .def("setFocalLength", &StereoDepth::setFocalLength, DOC(dai, node, StereoDepth, setFocalLength)) ; // ALIAS daiNodeModule.attr("StereoDepth").attr("Properties") = stereoDepthProperties; diff --git a/src/pybind11_common.hpp b/src/pybind11_common.hpp index 6dbc0967b..6478ae5e2 100644 --- a/src/pybind11_common.hpp +++ b/src/pybind11_common.hpp @@ -13,6 +13,7 @@ #include #include #include +#include // Include docstring file #include "docstring.hpp" diff --git a/src/utility/SpanBindings.hpp b/src/utility/SpanBindings.hpp new file mode 100644 index 000000000..859f1df31 --- /dev/null +++ b/src/utility/SpanBindings.hpp @@ -0,0 +1,170 @@ +#pragma once + +#include +#include + +#include "depthai/utility/span.hpp" + +namespace pybind11 { +namespace detail { + +template +struct span_name_maker { + template + static constexpr auto make(const T &t) { + return concat(t, span_name_maker::make(t)); + } +}; + +template <> +struct span_name_maker<1> { + template + static constexpr auto make(const T &t) { + return t; + } +}; + +// span with fixed size converts to a tuple +template struct type_caster> { + using span_type = typename dai::span; + using value_conv = make_caster; + using value_type = typename std::remove_cv::type; + + value_type backing_array[Extent] = {}; + + PYBIND11_TYPE_CASTER(span_type, _("Tuple[") + span_name_maker::make(value_conv::name) + _("]")); + + type_caster() : value(backing_array) {} + + bool load(handle src, bool convert) { + if (!isinstance(src) || isinstance(src)) + return false; + auto s = reinterpret_borrow(src); + if (s.size() != Extent) + return false; + size_t i = 0; + for (auto it : s) { + value_conv conv; + if (!conv.load(it, convert)) + return false; + backing_array[i] = cast_op(std::move(conv)); + i++; + } + return true; + } + +public: + template + static handle cast(T &&src, return_value_policy policy, handle parent) { + if (!std::is_lvalue_reference::value) + policy = return_value_policy_override::policy(policy); + tuple l(Extent); + size_t index = 0; + for (auto &&value : src) { + auto value_ = reinterpret_steal( + value_conv::cast(forward_like(value), policy, parent)); + if (!value_) + return handle(); + PyTuple_SET_ITEM(l.ptr(), (ssize_t)index++, + value_.release().ptr()); // steals a reference + } + return l.release(); + } +}; + + +// span with dynamic extent +template struct type_caster> { + using span_type = typename dai::span; + using value_conv = make_caster; + using value_type = typename std::remove_cv::type; + PYBIND11_TYPE_CASTER(span_type, _("List[") + value_conv::name + _("]")); + + std::vector vec; + bool load(handle src, bool convert) { + if (!isinstance(src) || isinstance(src)) + return false; + auto s = reinterpret_borrow(src); + vec.reserve(s.size()); + for (auto it : s) { + value_conv conv; + if (!conv.load(it, convert)) + return false; + vec.push_back(cast_op(std::move(conv))); + } + value = span_type(vec.data(), vec.size()); + return true; + } + +public: + template + static handle cast(T &&src, return_value_policy policy, handle parent) { + if (!std::is_lvalue_reference::value) + policy = return_value_policy_override::policy(policy); + list l(src.size()); + size_t index = 0; + for (auto &&value : src) { + auto value_ = reinterpret_steal( + value_conv::cast(forward_like(value), policy, parent)); + if (!value_) + return handle(); + PyList_SET_ITEM(l.ptr(), (ssize_t)index++, + value_.release().ptr()); // steals a reference + } + return l.release(); + } +}; + +// span specialization: accepts any readonly buffers +template <> struct type_caster> { + using span_type = typename dai::span; + PYBIND11_TYPE_CASTER(span_type, _("buffer")); + + bool load(handle src, bool convert) { + if (!isinstance(src)) + return false; + auto buf = reinterpret_borrow(src); + auto req = buf.request(); + if (req.ndim != 1) { + return false; + } + + value = span_type((const uint8_t*)req.ptr, req.size*req.itemsize); + return true; + } + +public: + template + static handle cast(T &&src, return_value_policy policy, handle parent) { + return bytes((char*)src.data(), src.size()).release(); + } +}; + +// span specialization: writeable buffer +template <> struct type_caster> { + using span_type = typename dai::span; + PYBIND11_TYPE_CASTER(dai::span, _("buffer")); + + bool load(handle src, bool convert) { + if (!isinstance(src)) + return false; + auto buf = reinterpret_borrow(src); + auto req = buf.request(true); // buffer must be writeable + if (req.ndim != 1) { + return false; + } + + value = dai::span((uint8_t*)req.ptr, req.size*req.itemsize); + return true; + } + +public: + template + static handle cast(T &&src, return_value_policy policy, handle parent) { + // TODO: should this be a memoryview instead? + return bytes((char*)src.data(), src.size()).release(); + } +}; + +} // namespace detail +} // namespace pybind11 \ No newline at end of file diff --git a/utilities/cam_test.py b/utilities/cam_test.py index 36456584a..58fb20bc8 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -41,6 +41,7 @@ import collections import time from itertools import cycle +from pathlib import Path def socket_type_pair(arg): socket, type = arg.split(',') @@ -66,6 +67,8 @@ def socket_type_pair(arg): help="Downscale the ISP output by this factor") parser.add_argument('-rs', '--resizable-windows', action='store_true', help="Make OpenCV windows resizable. Note: may introduce some artifacts") +parser.add_argument('-tun', '--camera-tuning', type=Path, + help="Path to custom camera tuning database") args = parser.parse_args() cam_list = [] @@ -174,11 +177,8 @@ def get(self): cam[c].setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG) cam[c].setFps(args.fps) -if 0: - print("=== Using custom camera tuning, and limiting RGB FPS to 10") - pipeline.setCameraTuningBlobPath("/home/user/Downloads/tuning_color_low_light.bin") - # TODO: change sensor driver to make FPS automatic (based on requested exposure time) - cam['rgb'].setFps(10) +if args.camera_tuning: + pipeline.setCameraTuningBlobPath(str(args.camera_tuning)) # Pipeline is defined, now we can connect to the device with dai.Device(pipeline) as device: