-
Notifications
You must be signed in to change notification settings - Fork 26
/
main.cpp
249 lines (234 loc) · 9.16 KB
/
main.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
#include <opencv2/opencv.hpp>
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/video/tracking.hpp"
#include <opencv2/nonfree/features2d.hpp>
#include <iostream>
#include <ctype.h>
#include "opticalflow.h"
#include "features.h"
#include "morph.h"
using namespace cv;
using namespace std;
#define dense 2 //set this to one if we want to compute dense correspondences
#define savevideo 1//set this to one if we want to write the output to a video file
#define calcfeat 1//set this to one if we want to calculate features in every frame
#define debugon 0//Sets some print statements enabling debug
vector<Mat> HogFeats;//Global variable for storing the computed HOG features of 200 road images
//extern Rect Rect2;//In this window we have to detect the static object as well.
void init()
{
cout<<"Started Initialization"<<endl;
Mat src1;
Mat gray1;
vector<float> ders1;
for(int i=1;i<=200;i++)
{
stringstream num1;
num1<<i;
string str1 = string("../../../Desktop/Harsha/CMU/SurroundView/datasets/Vehicles/Roads/") +num1.str()+ ".png";
src1 = imread(str1);
HOGDescriptor hog(Size(96,64), Size(8,8), Size(4,4), Size(4,4), 9);
cvtColor(src1, gray1, CV_BGR2GRAY);
resize(gray1, gray1, Size(96, 64));
hog.compute(gray1,ders1,Size(0,0), Size(0,0));
Mat A(ders1.size(),1,CV_32FC1);
memcpy(A.data,ders1.data(),ders1.size()*sizeof(float));
HogFeats.push_back(A);
//HogFeats[i]=A.clone();
}
}
int main( int argc, char** argv )
{
const int MAX_COUNT = 5000;
vector<Point2f> points[2];
vector<uchar> status;
vector<float> err;
Mat prv,next, flow, colflow,imsparse ,colim, diff,prvmul;
Mat binimg;//Used for storing the binary images
Mat featimg;//Used for getting the features
Mat winimg;//img containing the rectangles
Mat gradnext;//Gradient of a frame
Mat blobimg;//Image with the connected components
vector<KeyPoint> feats;
vector < vector<Point2i > > blobs;
int nframes=0;
int i=0;
/*Initializing our system first*/
init();
VideoCapture cap("../../../Desktop/Harsha/CMU/SurroundView/MovingObjectDetection/4.mp4");
Size S = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH),
(int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));//Input Size
VideoWriter outputVideo;
//int ex = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC));
if(savevideo)
{
if(dense)
outputVideo.open("Track4statmov.avi" , CV_FOURCC('M','J','P','G'), cap.get(CV_CAP_PROP_FPS),S, true);
else
outputVideo.open("StatMov5.avi" , 1, cap.get(CV_CAP_PROP_FPS),S, true);
//Reading the first frame from the video into the previous frame
if (!outputVideo.isOpened())
{
cout << "Could not open the output video for write: " << endl;
return -1;
}
}
if(!(cap.read(prv)))
return 0;
cvtColor(prv, prv, CV_BGR2GRAY);
threshold(prv, binimg, 100, 0, THRESH_BINARY);
//cout<<"The frame size is "<<prv.size()<<prv.rows<<prv.cols<<endl;
//Computing the features to track at the beginning itself
goodFeaturesToTrack(prv, points[0], MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.04);
while(true)
{
if(!(cap.read(next)))
break;
if(next.empty())
break;
colim=next.clone();
cvtColor(next, next, CV_BGR2GRAY);
diff=abs(next-prv);
//imshow("Difference",diff);
#if dense==0
threshold(diff, binimg, 50, 1, THRESH_BINARY);//Will have very low
//threshold because already it is difference of images
//imshow("BinaryImage", binimg);
Scharr(diff, gradnext, CV_64F, 0, 1, 3);
imshow("GradientImage",gradnext);
//threshold(gradnext, binimg, 50, 255, THRESH_BINARY);//Will have very low
//imgerode(binimg, binimg, 1);
imgdilate(binimg, binimg, 2);
//imgerode(binimg, binimg, 4);
imshow("MorphedImage",binimg);
FindBlobs(binimg,blobimg, blobs);
cout<<"The number of blobs detected in this frame is"<<blobs.size()<<endl;
/*We will now calculate some features useful for identifying the obstacles*/
if(calcfeat)
{
//showorb(next, featimg,feats);
showorb(diff, featimg,feats);//Computing features on the diff of images.
points[0].clear();
for(i=0;i<feats.size();i++)
{
points[0].push_back(Point2f(feats[i].pt.x,feats[i].pt.y));
}
cout << points[0].size() << endl;
/*Done calculating the features*/
}
//blobs.clear();
extractwindowsrefined(next,blobimg,winimg,points,blobs);
//extractwindows(next,winimg,points);
imshow("WindowsImage",winimg);
calcOpticalFlowPyrLK(
prv, next, // 2 consecutive images
points[0], // input point positions in first im
points[1], // output point positions in the 2nd
status, // tracking success
err // tracking error
);
//cout<<"Optical Flow computed for one frame"<<endl;
drawoptflowsparse(prv,colim,imsparse,points);
imshow("SparseFlow",imsparse);
outputVideo.write(winimg);
if (waitKey(5) >= 0)
break;
swap(points[1], points[0]);
#endif
//The dense flow calculation takes a lot of time since it matches every
//pixel in one image to other image.
#if dense==1
calcOpticalFlowFarneback(prv, next, flow, 0.5, 1, 5, 3, 5, 1.2, 0);
cvtColor(prv, colflow, CV_GRAY2BGR);
drawOptFlowMap(flow, colflow, 20, CV_RGB(0, 255, 0));
cout<<colflow.size()<<endl;
findobst(flow, colflow);
imshow("DenseFlow",colflow);
outputVideo.write(colflow);
if (waitKey(5) >= 0)
break;
#endif
#if dense==2
threshold(diff, binimg, 200, 255, THRESH_BINARY);
//imshow("Binary Image",binimg);
if(debugon)
cout<<"Calculated Binary image"<<endl;
prvmul=prv.mul(binimg);
if(debugon)
cout<<"Completed element wise multiplication"<<endl;
//imshow("PreviousImage",prv);
showorb(prvmul, featimg,feats);//Computing features on the diff of images.
points[0].clear();
points[1].clear();
for(i=0;i<feats.size();i++)
{
points[0].push_back(Point2f(feats[i].pt.x,feats[i].pt.y));
}
if(debugon)
{
cout<<"Computed ORB features"<<prv.size()<<" "<<next.size()<<endl;
cout<<points[0].size()<<" "<<points[1].size()<<endl;
cout<<"The size of the images passed are"<<prv.size()<<" "<<next.size()<<endl;
}
if(points[0].size()>0)
{
calcOpticalFlowPyrLK(
prv, next, // 2 consecutive images
points[0], // input point positions in first im
points[1], // output point positions in the 2nd
status, // tracking success
err // tracking error
);
if(debugon)
cout<<"Computed sparse optical flow features"<<endl;
drawoptflowsparse(prv,colim,imsparse,points,status,err);
findobst(prv, imsparse,imsparse,points,status,err);
//findstatobst(diff,next);
imshow("SparseFlow",imsparse);
if(debugon)
cout<<"Located obstacles"<<endl;
if(debugon)
cout<<"Wrote into the video"<<endl;
//swap(points[1], points[0]);
}
/*
We will now detect the static objects in the images
*/
showorb(next, featimg,feats);//Computing features on the diff of images.
points[0].clear();
points[1].clear();
for(i=0;i<feats.size();i++)
{
points[0].push_back(Point2f(feats[i].pt.x,feats[i].pt.y));
}
if(points[0].size()>0)
{
calcOpticalFlowPyrLK(
prv, next, // 2 consecutive images
points[0], // input point positions in first im
points[1], // output point positions in the 2nd
status, // tracking success
err // tracking error
);
if(debugon)
cout<<"Computed sparse optical flow features"<<endl;
//drawoptflowsparse(prv,colim,imsparse,points,status,err);
findstaticobst(prv, colim,imsparse,points,status,err);
imshow("StaticObjects",imsparse);
if(debugon)
cout<<"Located obstacles"<<endl;
if(debugon)
cout<<"Wrote into the video"<<endl;
//swap(points[1], points[0]);
}
if(savevideo)
outputVideo.write(imsparse);
if (waitKey(5) >= 0)
break;
#endif
prv = next.clone();
}
return 0;
}