-
Notifications
You must be signed in to change notification settings - Fork 3
/
TrackerOpticalFlow.cpp
189 lines (148 loc) · 6.01 KB
/
TrackerOpticalFlow.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
#include "TrackerOpticalFlow.h"
#include <iostream>
#include <vector>
#include <exception>
#include <opencv2/core/core.hpp>
#include <opencv2/video/video.hpp>
#include "evgUtilities/rectOperations.h"
using namespace std;
using namespace cv;
bool OpticalFlowTracker::init(const cv::Mat& inputFrame, const cv::Rect& inputBox)
{
try {
refBox = inputBox;
// correct the position of inputBox within inputFrame, if necessary
evg::validateRect(inputFrame.size(), innerDepth, borderDepth, refBox);
// increase the rectangle we use
cv::Rect refBoxExpanded = evg::expandRect(refBox, borderDepth);
// crop the input frame up to refBoxExpanded and seed it to class
refFrameCrop = inputFrame(refBoxExpanded).clone();
//if (refFrameCrop.type() == CV_8UC3)
// cvtColor( refFrameCrop, refFrameCrop, CV_RGB2GRAY );
return 1;
} catch(...) {
std::cout << "Error in OpticalFlowTracker::init()." << std::endl;
throw;
}
}
bool OpticalFlowTracker::processFrame(const cv::Mat& newFrame, cv::Rect& resultBox)
{
try {
// increase the rectangle we need to tackle
cv::Rect refBoxExpanded = evg::expandRect(refBox, borderDepth);
// crop new frame
cv::Mat newFrameCrop = newFrame(refBoxExpanded).clone();
// get good points for optical flow tracking
vector<Point2f> refPoints, newPoints;
const int maxPoints = 50;
// mask border around expanded rectangle
Mat maskPoints = Mat::zeros(refFrameCrop.size(), CV_8UC1);
Rect innerRect (Point(borderDepth, borderDepth), refBox.size());
rectangle(maskPoints, innerRect, Scalar(255), CV_FILLED);
// process color
cv::Mat refFrameCropGray = refFrameCrop;
if (refFrameCrop.type() == CV_8UC3)
{
refFrameCropGray = refFrameCropGray.clone();
cvtColor( refFrameCrop, refFrameCropGray, CV_RGB2GRAY );
}
// find points
goodFeaturesToTrack(refFrameCropGray, refPoints, maxPoints, 0.01, 4, maskPoints);
// if no points found - no motion
Scalar delta;
if (refPoints.size() != 0)
{
// optical flow
vector<uchar> pointStatus;
vector<float> pointErr;
Size winsizeSq = Size(winsize, winsize);
calcOpticalFlowPyrLK(refFrameCrop, newFrameCrop, refPoints, newPoints,
pointStatus, pointErr, winsizeSq, maxLevel,
TermCriteria( TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01),
derivLambda);
// getting the mean
vector<Point2f> deltaVect (refPoints.size());
for (unsigned long i = 0; i != refPoints.size(); ++i)
if (pointStatus[i])
deltaVect[i] = newPoints[i] - refPoints[i];
// filtering non-moving points
vector<double> absVect (deltaVect.size());
double maxAbsVect = 0;
for (unsigned int i = 0; i != deltaVect.size(); ++i)
{
// get flow norm
absVect[i] = norm(deltaVect[i]);
// update max flow norm
if (absVect[i] > maxAbsVect)
maxAbsVect = absVect[i];
}
for (long i = absVect.size()-1; i != -1; --i)
// for every small flow norm
if (absVect[i] < thresMovePerc * maxAbsVect)
{
// erase element
deltaVect.erase(deltaVect.begin() + i);
absVect.erase(absVect.begin() + i);
}
delta = cv::mean(deltaVect);
}
int dx = round(double(delta[0]));
int dy = round(double(delta[1]));
// write the result
resultBox.x = refBox.x + dx;
resultBox.y = refBox.y + dy;
// validate result
evg::validateRect(newFrame.size(), innerDepth, borderDepth, resultBox);
// update the reference box
cv::Rect resultBoxExpanded = evg::expandRect(resultBox, borderDepth);
refFrameCrop = newFrame(resultBoxExpanded).clone();
//if (refFrameCrop.type() == CV_8UC3)
// cvtColor( refFrameCrop, refFrameCrop, CV_RGB2GRAY );
refBox = resultBox;
return 1;
} catch(...) {
std::cout << "Error in OpticalFlowTracker::processFrame()." << std::endl;
throw;
}
}
bool TrackerOpticalFlowFace::init (const Mat& _frame, const Face& _face, void* _anyData)
{
try {
(void)_anyData;
// get rectangles from face
const std::vector< const cv::Rect* > face_parts = _face.asPtrVector();
if ( face_parts.size() > 3 )
{
cerr << "Error in TrackerOpticalFlowFace::init() "
<< "- more than 3 face rectangles." << endl;
return 0;
}
// init every rectangles
for (unsigned int part = 0; part != face_parts.size(); ++part )
if (! trackers[part].init(_frame, *face_parts[part]) ) return 0;
return 1;
} catch(...) {
cerr << "Exception in TrackerOpticalFlowFace::init()." << endl;
return 0;
}
}
bool TrackerOpticalFlowFace::processFrame (const Mat& _frame, Face& _face)
{
try {
// get rectangles from face
std::vector< cv::Rect* > face_parts = _face.asPtrVector();
if ( face_parts.size() > 3 )
{
cerr << "Error in TrackerOpticalFlowFace::processFrame() "
<< "- more than 3 face rectangles." << endl;
return 0;
}
// process every rectangles
for (unsigned int part = 0; part != face_parts.size(); ++part )
if (! trackers[part].processFrame(_frame, *face_parts[part]) ) return 0;
return 1;
} catch(...) {
cerr << "Exception in TrackerOpticalFlowFace::processFrame()." << endl;
return 0;
}
}