Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
47 commits
Select commit Hold shift + click to select a range
9b29607
POC: Feature tracker node
SzabolcsGergely Jul 6, 2021
2bd58af
Do tracking on 1280x800
SzabolcsGergely Jul 6, 2021
90fc5c1
Add FeatureDetector node; update example with trajectory
SzabolcsGergely Jul 9, 2021
24c361b
Feature tracking on stereo frames
SzabolcsGergely Jul 20, 2021
5622fd7
Merge remote-tracking branch 'origin/develop' into feature_tracker
SzabolcsGergely Jul 20, 2021
4fa9006
Update core
SzabolcsGergely Jul 21, 2021
e834ab4
Add config fields to feature tracker node
SzabolcsGergely Jul 22, 2021
8e015ea
Merge remote-tracking branch 'origin/develop' into feature_tracker
SzabolcsGergely Jul 25, 2021
57c284c
Add bindings for feature tracker
SzabolcsGergely Jul 25, 2021
5e94eca
Add overloaded functions to disable optical flow
SzabolcsGergely Jul 25, 2021
feca58b
Add corner detector example: Harris or Shi-Thomasi
SzabolcsGergely Jul 25, 2021
f18ab42
Add configurable shave/memory resources to feature tracker
SzabolcsGergely Jul 26, 2021
0517a1a
Sync python-cpp examples
SzabolcsGergely Jul 26, 2021
f323fb5
Rename FeatureTrackerData to TrackedFeatures
SzabolcsGergely Jul 27, 2021
37c1ce8
Update core
SzabolcsGergely Jul 27, 2021
97ddf28
Add support for hardware accelerated motion estimation
SzabolcsGergely Jul 29, 2021
e04d9e7
Rename feature tracker config fields
SzabolcsGergely Jul 29, 2021
6ceb906
Refactor FeatureTrackerConfig
SzabolcsGergely Jul 30, 2021
1dee705
Merge remote-tracking branch 'origin/develop' into feature_tracker
SzabolcsGergely Jul 30, 2021
b6f93d8
Update core
SzabolcsGergely Jul 31, 2021
209927a
Update bindings
SzabolcsGergely Aug 2, 2021
b0093c7
Update core
SzabolcsGergely Aug 2, 2021
8079d78
Added python bindings for DeviceBase
themarpe Aug 3, 2021
116762a
Modified documentation entries for DeviceBase and Device
themarpe Aug 5, 2021
2d65bcc
Update examples; update core
SzabolcsGergely Aug 6, 2021
92ee091
Add missing properties bindings
SzabolcsGergely Aug 6, 2021
3f04b03
Merge branch 'main' into develop
themarpe Aug 9, 2021
6ed91f8
Merge remote-tracking branch 'origin/develop' into HEAD
SzabolcsGergely Aug 10, 2021
30221f4
Update core to latest develop
SzabolcsGergely Aug 10, 2021
8366a5d
Merge pull request #317 from luxonis/feature_tracker
SzabolcsGergely Aug 10, 2021
d6bd4a4
Add workaround for stereo subpixel/extended mode crash at the expense…
SzabolcsGergely Aug 11, 2021
36959ec
Merge pull request #343 from luxonis/stereo_crash_workaround
SzabolcsGergely Aug 11, 2021
df60753
Merge branch 'deviceBase' into develop
themarpe Aug 12, 2021
1b9ae25
Merge branch 'main' into develop
themarpe Aug 17, 2021
bd207ce
Update core with bilateral fix for subpixel
SzabolcsGergely Aug 17, 2021
48a76eb
Pipeline: add setXlinkChunkSize
alex-luxonis Aug 17, 2021
7de407b
Fix naming `setXlinkChunkSize` -> `setXLinkChunkSize`
alex-luxonis Aug 18, 2021
e44938b
DeviceBindings: add `{set/get}XLinkChunkSize` RPC calls
alex-luxonis Aug 23, 2021
8b80c99
Add some missing Device() constructor overloads - no-pipeline versions
alex-luxonis Aug 23, 2021
9faa1d5
Device(): rename param for consistency: `deviceDesc` -> `devInfo`
alex-luxonis Aug 23, 2021
8a5c50d
Add python version and platform checks before installing requirements…
VanDavv Aug 24, 2021
23fb52a
Remove for now docstrings for some Device() constructors,
alex-luxonis Aug 24, 2021
dd6f40d
Add back docstrings pointing to DeviceBase
alex-luxonis Aug 24, 2021
c5fc08a
Merge pull request #346 from luxonis/xlink_chunk_size
alex-luxonis Aug 24, 2021
e97a0a0
Merge remote-tracking branch 'origin/main' into HEAD
SzabolcsGergely Aug 24, 2021
b555f05
Update core
SzabolcsGergely Aug 24, 2021
9b24e44
Bump version to 2.10.0.0
SzabolcsGergely Aug 24, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion depthai-core
2 changes: 2 additions & 0 deletions examples/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -127,3 +127,5 @@ add_python_example(imu_rotation_vector imu_rotation_vector.py)
add_python_example(rgb_depth_aligned rgb_depth_aligned.py)
add_python_example(edge_detector edge_detector.py)
add_python_example(script_camera_control script_camera_control.py)
add_python_example(feature_tracker feature_tracker.py)
add_python_example(corner_detector corner_detector.py)
105 changes: 105 additions & 0 deletions examples/corner_detector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
#!/usr/bin/env python3

import cv2
import depthai as dai


# Create pipeline
pipeline = dai.Pipeline()

# Define sources and outputs
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
featureTrackerLeft = pipeline.create(dai.node.FeatureTracker)
featureTrackerRight = pipeline.create(dai.node.FeatureTracker)

xoutPassthroughFrameLeft = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesLeft = pipeline.create(dai.node.XLinkOut)
xoutPassthroughFrameRight = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesRight = pipeline.create(dai.node.XLinkOut)
xinTrackedFeaturesConfig = pipeline.create(dai.node.XLinkIn)

xoutPassthroughFrameLeft.setStreamName("passthroughFrameLeft")
xoutTrackedFeaturesLeft.setStreamName("trackedFeaturesLeft")
xoutPassthroughFrameRight.setStreamName("passthroughFrameRight")
xoutTrackedFeaturesRight.setStreamName("trackedFeaturesRight")
xinTrackedFeaturesConfig.setStreamName("trackedFeaturesConfig")

# Properties
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)

# Disable optical flow
featureTrackerLeft.initialConfig.setMotionEstimator(False)
featureTrackerRight.initialConfig.setMotionEstimator(False)

# Linking
monoLeft.out.link(featureTrackerLeft.inputImage)
featureTrackerLeft.passthroughInputImage.link(xoutPassthroughFrameLeft.input)
featureTrackerLeft.outputFeatures.link(xoutTrackedFeaturesLeft.input)
xinTrackedFeaturesConfig.out.link(featureTrackerLeft.inputConfig)

monoRight.out.link(featureTrackerRight.inputImage)
featureTrackerRight.passthroughInputImage.link(xoutPassthroughFrameRight.input)
featureTrackerRight.outputFeatures.link(xoutTrackedFeaturesRight.input)
xinTrackedFeaturesConfig.out.link(featureTrackerRight.inputConfig)

featureTrackerConfig = featureTrackerRight.initialConfig.get()

print("Press 's' to switch between Harris and Shi-Thomasi corner detector!")

# Connect to device and start pipeline
with dai.Device(pipeline) as device:

# Output queues used to receive the results
passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, False)
outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, False)
passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, False)
outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, False)

inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig")

leftWindowName = "left"
rightWindowName = "right"

def drawFeatures(frame, features):
pointColor = (0, 0, 255)
circleRadius = 2
for feature in features:
cv2.circle(frame, (int(feature.position.x), int(feature.position.y)), circleRadius, pointColor, -1, cv2.LINE_AA, 0)

while True:
inPassthroughFrameLeft = passthroughImageLeftQueue.get()
passthroughFrameLeft = inPassthroughFrameLeft.getFrame()
leftFrame = cv2.cvtColor(passthroughFrameLeft, cv2.COLOR_GRAY2BGR)

inPassthroughFrameRight = passthroughImageRightQueue.get()
passthroughFrameRight = inPassthroughFrameRight.getFrame()
rightFrame = cv2.cvtColor(passthroughFrameRight, cv2.COLOR_GRAY2BGR)

trackedFeaturesLeft = outputFeaturesLeftQueue.get().trackedFeatures
drawFeatures(leftFrame, trackedFeaturesLeft)

trackedFeaturesRight = outputFeaturesRightQueue.get().trackedFeatures
drawFeatures(rightFrame, trackedFeaturesRight)

# Show the frame
cv2.imshow(leftWindowName, leftFrame)
cv2.imshow(rightWindowName, rightFrame)

key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('s'):
if featureTrackerConfig.cornerDetector.type == dai.FeatureTrackerConfig.CornerDetector.Type.HARRIS:
featureTrackerConfig.cornerDetector.type = dai.FeatureTrackerConfig.CornerDetector.Type.SHI_THOMASI
print("Switching to Shi-Thomasi")
else:
featureTrackerConfig.cornerDetector.type = dai.FeatureTrackerConfig.CornerDetector.Type.HARRIS
print("Switching to Harris")

cfg = dai.FeatureTrackerConfig()
cfg.set(featureTrackerConfig)
inputFeatureTrackerConfigQueue.send(cfg)
173 changes: 173 additions & 0 deletions examples/feature_tracker.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
#!/usr/bin/env python3

import cv2
import depthai as dai
from collections import deque

class FeatureTrackerDrawer:

lineColor = (200, 0, 200)
pointColor = (0, 0, 255)
circleRadius = 2
maxTrackedFeaturesPathLength = 30
# for how many frames the feature is tracked
trackedFeaturesPathLength = 10

trackedIDs = None
trackedFeaturesPath = None

def onTrackBar(self, val):
FeatureTrackerDrawer.trackedFeaturesPathLength = val
pass

def trackFeaturePath(self, features):

newTrackedIDs = set()
for currentFeature in features:
currentID = currentFeature.id
newTrackedIDs.add(currentID)

if currentID not in self.trackedFeaturesPath:
self.trackedFeaturesPath[currentID] = deque()

path = self.trackedFeaturesPath[currentID]

path.append(currentFeature.position)
while(len(path) > max(1, FeatureTrackerDrawer.trackedFeaturesPathLength)):
path.popleft()

self.trackedFeaturesPath[currentID] = path

featuresToRemove = set()
for oldId in self.trackedIDs:
if oldId not in newTrackedIDs:
featuresToRemove.add(oldId)

for id in featuresToRemove:
self.trackedFeaturesPath.pop(id)

self.trackedIDs = newTrackedIDs

def drawFeatures(self, img):

cv2.setTrackbarPos(self.trackbarName, self.windowName, FeatureTrackerDrawer.trackedFeaturesPathLength)

for featurePath in self.trackedFeaturesPath.values():
path = featurePath

for j in range(len(path) - 1):
src = (int(path[j].x), int(path[j].y))
dst = (int(path[j + 1].x), int(path[j + 1].y))
cv2.line(img, src, dst, self.lineColor, 1, cv2.LINE_AA, 0)
j = len(path) - 1
cv2.circle(img, (int(path[j].x), int(path[j].y)), self.circleRadius, self.pointColor, -1, cv2.LINE_AA, 0)

def __init__(self, trackbarName, windowName):
self.trackbarName = trackbarName
self.windowName = windowName
cv2.namedWindow(windowName)
cv2.createTrackbar(trackbarName, windowName, FeatureTrackerDrawer.trackedFeaturesPathLength, FeatureTrackerDrawer.maxTrackedFeaturesPathLength, self.onTrackBar)
self.trackedIDs = set()
self.trackedFeaturesPath = dict()


# Create pipeline
pipeline = dai.Pipeline()

# Define sources and outputs
monoLeft = pipeline.create(dai.node.MonoCamera)
monoRight = pipeline.create(dai.node.MonoCamera)
featureTrackerLeft = pipeline.create(dai.node.FeatureTracker)
featureTrackerRight = pipeline.create(dai.node.FeatureTracker)

xoutPassthroughFrameLeft = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesLeft = pipeline.create(dai.node.XLinkOut)
xoutPassthroughFrameRight = pipeline.create(dai.node.XLinkOut)
xoutTrackedFeaturesRight = pipeline.create(dai.node.XLinkOut)
xinTrackedFeaturesConfig = pipeline.create(dai.node.XLinkIn)

xoutPassthroughFrameLeft.setStreamName("passthroughFrameLeft")
xoutTrackedFeaturesLeft.setStreamName("trackedFeaturesLeft")
xoutPassthroughFrameRight.setStreamName("passthroughFrameRight")
xoutTrackedFeaturesRight.setStreamName("trackedFeaturesRight")
xinTrackedFeaturesConfig.setStreamName("trackedFeaturesConfig")

# Properties
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)

# Linking
monoLeft.out.link(featureTrackerLeft.inputImage)
featureTrackerLeft.passthroughInputImage.link(xoutPassthroughFrameLeft.input)
featureTrackerLeft.outputFeatures.link(xoutTrackedFeaturesLeft.input)
xinTrackedFeaturesConfig.out.link(featureTrackerLeft.inputConfig)

monoRight.out.link(featureTrackerRight.inputImage)
featureTrackerRight.passthroughInputImage.link(xoutPassthroughFrameRight.input)
featureTrackerRight.outputFeatures.link(xoutTrackedFeaturesRight.input)
xinTrackedFeaturesConfig.out.link(featureTrackerRight.inputConfig)

# By default the least mount of resources are allocated
# increasing it improves performance
numShaves = 2
numMemorySlices = 2
featureTrackerLeft.setHardwareResources(numShaves, numMemorySlices)
featureTrackerRight.setHardwareResources(numShaves, numMemorySlices)

featureTrackerConfig = featureTrackerRight.initialConfig.get()
print("Press 's' to switch between Lucas-Kanade optical flow and hardware accelerated motion estimation!")

# Connect to device and start pipeline
with dai.Device(pipeline) as device:

# Output queues used to receive the results
passthroughImageLeftQueue = device.getOutputQueue("passthroughFrameLeft", 8, False)
outputFeaturesLeftQueue = device.getOutputQueue("trackedFeaturesLeft", 8, False)
passthroughImageRightQueue = device.getOutputQueue("passthroughFrameRight", 8, False)
outputFeaturesRightQueue = device.getOutputQueue("trackedFeaturesRight", 8, False)

inputFeatureTrackerConfigQueue = device.getInputQueue("trackedFeaturesConfig")

leftWindowName = "left"
leftFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", leftWindowName)

rightWindowName = "right"
rightFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", rightWindowName)

while True:
inPassthroughFrameLeft = passthroughImageLeftQueue.get()
passthroughFrameLeft = inPassthroughFrameLeft.getFrame()
leftFrame = cv2.cvtColor(passthroughFrameLeft, cv2.COLOR_GRAY2BGR)

inPassthroughFrameRight = passthroughImageRightQueue.get()
passthroughFrameRight = inPassthroughFrameRight.getFrame()
rightFrame = cv2.cvtColor(passthroughFrameRight, cv2.COLOR_GRAY2BGR)

trackedFeaturesLeft = outputFeaturesLeftQueue.get().trackedFeatures
leftFeatureDrawer.trackFeaturePath(trackedFeaturesLeft)
leftFeatureDrawer.drawFeatures(leftFrame)

trackedFeaturesRight = outputFeaturesRightQueue.get().trackedFeatures
rightFeatureDrawer.trackFeaturePath(trackedFeaturesRight)
rightFeatureDrawer.drawFeatures(rightFrame)

# Show the frame
cv2.imshow(leftWindowName, leftFrame)
cv2.imshow(rightWindowName, rightFrame)

key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('s'):
if featureTrackerConfig.motionEstimator.type == dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW:
featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.HW_MOTION_ESTIMATION
print("Switching to hardware accelerated motion estimation")
else:
featureTrackerConfig.motionEstimator.type = dai.FeatureTrackerConfig.MotionEstimator.Type.LUCAS_KANADE_OPTICAL_FLOW
print("Switching to Lucas-Kanade optical flow")

cfg = dai.FeatureTrackerConfig()
cfg.set(featureTrackerConfig)
inputFeatureTrackerConfigQueue.send(cfg)
23 changes: 23 additions & 0 deletions examples/install_requirements.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#!/usr/bin/env python3
import platform
import sys, os, subprocess
import argparse
import re
Expand Down Expand Up @@ -45,7 +46,29 @@ def hasWhitespace(string):
# Check if in virtual environment
in_venv = getattr(sys, "real_prefix", getattr(sys, "base_prefix", sys.prefix)) != sys.prefix
pip_call = [sys.executable, "-m", "pip"]
pip_installed = True
pip_install = pip_call + ["install"]

try:
subprocess.check_call(pip_call + ["--version"])
except subprocess.CalledProcessError as ex:
pip_installed = False

if not pip_installed:
err_str = "Issues with \"pip\" package detected! Follow the official instructions to install - https://pip.pypa.io/en/stable/installation/"
raise RuntimeError(err_str)

if sys.version_info[0] != 3:
raise RuntimeError("Examples require Python 3 to run (detected: Python {})".format(sys.version_info[0]))

if platform.machine() == "arm64" and platform.system() == "Darwin":
err_str = "There are no prebuilt wheels for M1 processors. Please open the following link for a solution - https://discuss.luxonis.com/d/69-running-depthai-on-apple-m1-based-macs"
raise RuntimeError(err_str)

is_pi = platform.machine().startswith("arm") or platform.machine().startswith("aarch")
if is_pi and sys.version_info[1] in (7, 9):
print("[WARNING] There are no prebuilt wheels for Python 3.{} for OpenCV, building process on this device may be long and unstable".format(sys.version_info[1]))

if not in_venv:
pip_install.append("--user")

Expand Down
Loading