PageRenderTime 84ms CodeModel.GetById 25ms RepoModel.GetById 0ms app.codeStats 1ms

/libeyes/camera.cpp

https://github.com/aiwenar/Eyes
C++ | 1383 lines | 1112 code | 143 blank | 128 comment | 243 complexity | 9207fbc2fc595f31095ec6a13f50a19b MD5 | raw file
  1. #include "camera.hxx"
  2. #include "hardware.hxx"
  3. #include "hungarian.h"
  4. #include "core.hxx"
  5. #include <unistd.h>
  6. #include <sys/stat.h>
  7. camcapture ccap;
  8. extern hardware HRDWR;
  9. extern percental cpu;
  10. using namespace cv;
  11. static Mat norm_0_255(InputArray _src) {
  12. Mat src = _src.getMat();
  13. // Create and return normalized image:
  14. Mat dst;
  15. switch(src.channels()) {
  16. case 1:
  17. cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
  18. break;
  19. case 3:
  20. cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3);
  21. break;
  22. default:
  23. src.copyTo(dst);
  24. break;
  25. }
  26. return dst;
  27. }
  28. IplImage* QImage2IplImage(QImage *qimg)
  29. {
  30. IplImage *imgHeader = cvCreateImageHeader( cvSize(qimg->width(), qimg->height()), IPL_DEPTH_8U, 4);
  31. imgHeader->imageData = (char*) qimg->bits();
  32. uchar* newdata = (uchar*) malloc(sizeof(uchar) * qimg->byteCount());
  33. memcpy(newdata, qimg->bits(), qimg->byteCount());
  34. imgHeader->imageData = (char*) newdata;
  35. return imgHeader;
  36. }
  37. QImage* IplImage2QImage(IplImage *iplImg)
  38. {
  39. int h = iplImg->height;
  40. int w = iplImg->width;
  41. int channels = iplImg->nChannels;
  42. QImage *qimg = new QImage(w, h, QImage::Format_ARGB32);
  43. char *data = iplImg->imageData;
  44. for (int y = 0; y < h; y++, data += iplImg->widthStep)
  45. {
  46. for (int x = 0; x < w; x++)
  47. {
  48. char r, g, b, a = 0;
  49. if (channels == 1)
  50. {
  51. r = data[x * channels];
  52. g = data[x * channels];
  53. b = data[x * channels];
  54. }
  55. else if (channels == 3 || channels == 4)
  56. {
  57. r = data[x * channels + 2];
  58. g = data[x * channels + 1];
  59. b = data[x * channels];
  60. }
  61. if (channels == 4)
  62. {
  63. a = data[x * channels + 3];
  64. qimg->setPixel(x, y, qRgba(r, g, b, a));
  65. }
  66. else
  67. {
  68. qimg->setPixel(x, y, qRgb(r, g, b));
  69. }
  70. }
  71. }
  72. return qimg;
  73. }
  74. bool camcapture::cam_init()
  75. {
  76. cam = NULL;
  77. cam = cvCaptureFromCAM(-1);
  78. if (cam == NULL)
  79. {
  80. cerr << "Could not initialize camera\n";
  81. return 0;
  82. }
  83. src = cvQueryFrame(cam);
  84. if (src == NULL)
  85. {
  86. cerr << "Could not initialize camera\n";
  87. return 0;
  88. }
  89. return 1;
  90. }
  91. IplImage* camcapture::get_image()
  92. {
  93. src = cvQueryFrame(cam);
  94. cvResize(src, resized);
  95. return resized;
  96. }
  97. void mirror::init_mirrors(IplImage * srcExample)
  98. {
  99. prevMirror = cvCloneImage(srcExample);
  100. double srcw = srcExample->width;
  101. double srch = srcExample->height;
  102. aspect.ST = srcw;
  103. aspect.ND = srch;
  104. if (aspectCorrection.ST != 0 && aspectCorrection.ND != 0)
  105. {
  106. //scale 4:3 aspect to correct aspect to fix picture distortion
  107. aspect.ST *= (aspectCorrection.ND*srcw)/(aspectCorrection.ST*srch);
  108. }
  109. //px data
  110. mirrorL = cvCreateImage(mirrorLsize, prevMirror->depth, prevMirror->nChannels);
  111. mirrorR = cvCreateImage(mirrorRsize, prevMirror->depth, prevMirror->nChannels);
  112. //percentage data form 4:3 rect
  113. mirrorL2Diff = 8500*((srcExample->width*srcExample->height)/(640*480));
  114. if (srcw/srch < aspect.ST/aspect.ND)
  115. {
  116. mirrorWorkspace.width = srcw;
  117. mirrorWorkspace.height = srcw*(aspect.ND/aspect.ST);
  118. mirrorWorkspace.x = 0;
  119. mirrorWorkspace.y = (srch-mirrorWorkspace.height)/2;
  120. }
  121. else
  122. {
  123. mirrorWorkspace.width = srch*(aspect.ST/aspect.ND);
  124. mirrorWorkspace.height = srch;
  125. mirrorWorkspace.x = (srcw-mirrorWorkspace.width)/2;
  126. mirrorWorkspace.y = 0;
  127. }
  128. cerr << mirrorWorkspace.width << " " << mirrorWorkspace.height << " " << mirrorWorkspace.x << " " << mirrorWorkspace.y << "\n";
  129. cerr << srcw << " " << srch << "\n";
  130. mirrorLsrc.x = (mirrorLsrc.x*mirrorWorkspace.width + mirrorWorkspace.x)/100.0;
  131. mirrorLsrc.width = (mirrorLsrc.width*mirrorWorkspace.width)/100.0;
  132. mirrorLsrc.y = (mirrorLsrc.y*mirrorWorkspace.height + mirrorWorkspace.y)/100.0;
  133. mirrorLsrc.height = (mirrorLsrc.height*mirrorWorkspace.height)/100.0;
  134. mirrorRsrc.x = (mirrorRsrc.x*mirrorWorkspace.width + mirrorWorkspace.x)/100.0;
  135. mirrorRsrc.width = (mirrorRsrc.width*mirrorWorkspace.width)/100.0;
  136. mirrorRsrc.y = (mirrorRsrc.y*mirrorWorkspace.height + mirrorWorkspace.y)/100.0;
  137. mirrorRsrc.height = (mirrorRsrc.height*mirrorWorkspace.height)/100.0;
  138. gaussSizeL.ST = (gaussSizeL.ST*mirrorLsize.width)/100;
  139. gaussSizeL.ND = (gaussSizeL.ND*mirrorLsize.height)/100;
  140. gaussSizeR.ST = (gaussSizeR.ST*mirrorRsize.width)/100;
  141. gaussSizeR.ND = (gaussSizeR.ND*mirrorRsize.height)/100;
  142. distortionSize.ST /= (mirrorLsize.width*mirrorLsize.width);
  143. distortionSize.ND /= (mirrorRsize.width*mirrorRsize.width);
  144. }
  145. bool mirror::processMirror(IplImage *src)
  146. {
  147. if (cvNorm(src, prevMirror) < mirrorL2Diff)
  148. {
  149. //cerr << "skipping - " << cvNorm(src, prevMirror) << " (" << mirrorL2Diff << ")\n";
  150. return 0;
  151. }
  152. //cerr << "not skipping - " << cvNorm(src, prevMirror) << " (" << mirrorL2Diff << ")\n";
  153. cvReleaseImage(&prevMirror);
  154. prevMirror = cvCloneImage(src);
  155. IplImage * mirror = cvCreateImage(cvSize(prevMirror->width, prevMirror->height), prevMirror->depth, prevMirror->nChannels);
  156. cvFlip(prevMirror, mirror, 1);
  157. cvSetImageROI(mirror, mirrorLsrc);
  158. cvResize(mirror, mirrorL);
  159. cvResetImageROI(mirror);
  160. cvSetImageROI(mirror, mirrorRsrc);
  161. cvResize(mirror, mirrorR);
  162. cvResetImageROI(mirror);
  163. cvReleaseImage(&mirror);
  164. //IplImage* srcd = cvLoadImage( "./test.png", 1 );
  165. //IplImage* newd;
  166. //newd = cvCloneImage(srcd);
  167. //cvResize(newd, mirrorL);
  168. //srcd = cvLoadImage( "./test.png", 1 );
  169. //newd = cvCloneImage(srcd);
  170. //cvResize(newd, mirrorR);
  171. //fisheye(mirrorL, 0.00001, make_pair(50, 50));
  172. //cvSaveImage("./test2.png",mirrorL,0);
  173. if (distort)
  174. {
  175. fisheye(mirrorL, distortionSize.ST, make_pair(50, 50));
  176. fisheye(mirrorR, distortionSize.ND, make_pair(50, 50));
  177. }
  178. //cerr << "HIER\n";
  179. if (boxblur)
  180. {
  181. blur(Mat(mirrorL), Mat(mirrorL), cvSize(gaussSizeL.ST, gaussSizeL.ND));
  182. blur(Mat(mirrorR), Mat(mirrorR), cvSize(gaussSizeR.ST, gaussSizeR.ND));
  183. }
  184. if (gaussian)
  185. {
  186. GaussianBlur(Mat(mirrorL), Mat(mirrorL), cvSize(gaussSizeL.ST*2.2 + (1 - int(gaussSizeL.ST*2.2) % 2),
  187. gaussSizeL.ND*2.2 + (1 - int(gaussSizeL.ND*2.2) % 2)), 0, 0);
  188. GaussianBlur(Mat(mirrorR), Mat(mirrorR), cvSize(gaussSizeR.ST*2.2 + (1 - int(gaussSizeR.ST*2.2) % 2),
  189. gaussSizeR.ND*2.2 + (1 - int(gaussSizeR.ND*2.2) % 2)), 0, 0);
  190. }
  191. if (bloom)
  192. {
  193. Mat bloomL, bloomR;
  194. if (gaussian)
  195. {
  196. GaussianBlur(Mat(mirrorL), bloomL, cvSize(gaussSizeL.ST*15 + (1 - int(gaussSizeL.ST*15) % 2),
  197. gaussSizeL.ND*15 + (1 - int(gaussSizeL.ND*15) % 2)), 0, 0);
  198. GaussianBlur(Mat(mirrorR), bloomR, cvSize(gaussSizeR.ST*15 + (1 - int(gaussSizeR.ST*15) % 2),
  199. gaussSizeR.ND*15 + (1 - int(gaussSizeR.ND*15) % 2)), 0, 0);
  200. addWeighted(Mat(mirrorL), 0.55, bloomL, 0.55, 0, Mat(mirrorL));
  201. addWeighted(Mat(mirrorR), 0.55, bloomR, 0.55, 0, Mat(mirrorR));
  202. }
  203. if (boxblur)
  204. {
  205. blur(Mat(mirrorL), bloomL, cvSize(gaussSizeL.ST*7 + (1 - int(gaussSizeL.ST*7) % 2),
  206. gaussSizeL.ND*7 + (1 - int(gaussSizeL.ND*7) % 2)));
  207. blur(Mat(mirrorR), bloomR, cvSize(gaussSizeR.ST*7 + (1 - int(gaussSizeR.ST*7) % 2),
  208. gaussSizeR.ND*7 + (1 - int(gaussSizeR.ND*7) % 2)));
  209. addWeighted(Mat(mirrorL), 0.55, bloomL, 0.45, 0, Mat(mirrorL));
  210. addWeighted(Mat(mirrorR), 0.55, bloomR, 0.45, 0, Mat(mirrorR));
  211. }
  212. }
  213. //cvSaveImage("./testL.png",mirrorL,0);
  214. //cvSaveImage("./testR.png",mirrorR,0);
  215. if (ccap.debug)
  216. {
  217. cvNamedWindow( "Source1", 1 );
  218. cvShowImage( "Source1", mirrorL);
  219. cvNamedWindow( "Source2", 1 );
  220. cvShowImage( "Source2", mirrorR);
  221. }
  222. return 1;
  223. }
  224. void mirror::sampleImage(const IplImage *arr, double idx0, double idx1, CvScalar &res)
  225. {
  226. if(idx0<0 || idx1<0 || idx0>(cvGetSize(arr).height-1) || idx1>(cvGetSize(arr).width-1))
  227. {
  228. res.val[0]=0;
  229. res.val[1]=0;
  230. res.val[2]=0;
  231. res.val[3]=0;
  232. return;
  233. }
  234. double idx0_fl=floor(idx0);
  235. double idx0_cl=ceil(idx0);
  236. double idx1_fl=floor(idx1);
  237. double idx1_cl=ceil(idx1);
  238. CvScalar s1=cvGet2D(arr,(int)idx0_fl,(int)idx1_fl);
  239. CvScalar s2=cvGet2D(arr,(int)idx0_fl,(int)idx1_cl);
  240. CvScalar s3=cvGet2D(arr,(int)idx0_cl,(int)idx1_cl);
  241. CvScalar s4=cvGet2D(arr,(int)idx0_cl,(int)idx1_fl);
  242. double x = idx0 - idx0_fl;
  243. double y = idx1 - idx1_fl;
  244. res.val[0]= s1.val[0]*(1-x)*(1-y) + s2.val[0]*(1-x)*y + s3.val[0]*x*y + s4.val[0]*x*(1-y);
  245. res.val[1]= s1.val[1]*(1-x)*(1-y) + s2.val[1]*(1-x)*y + s3.val[1]*x*y + s4.val[1]*x*(1-y);
  246. res.val[2]= s1.val[2]*(1-x)*(1-y) + s2.val[2]*(1-x)*y + s3.val[2]*x*y + s4.val[2]*x*(1-y);
  247. res.val[3]= s1.val[3]*(1-x)*(1-y) + s2.val[3]*(1-x)*y + s3.val[3]*x*y + s4.val[3]*x*(1-y);
  248. }
  249. pair <double, double> mirror::getRadial(double x, double y, double cx, double cy, double k)
  250. {
  251. x = (x*scale.ST+shift.ST);
  252. y = (y*scale.ND+shift.ND);
  253. double resX = x+((x-cx)*k*((x-cx)*(x-cx)+(y-cy)*(y-cy)));
  254. double resY = y+((y-cy)*k*((x-cx)*(x-cx)+(y-cy)*(y-cy)));
  255. return make_pair(resX, resY);
  256. }
  257. double mirror::calc_shift(double x1, double x2, double cx, double k)
  258. {
  259. double thresh = 1;
  260. double x3 = x1+(x2-x1)*0.5;
  261. double res1 = x1+((x1-cx)*k*((x1-cx)*(x1-cx)));
  262. double res3 = x3+((x3-cx)*k*((x3-cx)*(x3-cx)));
  263. // std::cerr<<"x1: "<<x1<<" - "<<res1<<" x3: "<<x3<<" - "<<res3<<std::endl;
  264. if(res1>-thresh and res1 < thresh)
  265. return x1;
  266. if(res3<0)
  267. {
  268. return calc_shift(x3,x2,cx,k);
  269. }
  270. else
  271. {
  272. return calc_shift(x1,x3,cx,k);
  273. }
  274. }
  275. void mirror::fisheye(IplImage *src, double distortion, pair<int, int> center)
  276. {
  277. IplImage* dis = cvCreateImage(cvSize(src->width, src->height),src->depth,src->nChannels);
  278. //cerr << "step1\n";
  279. double centerX=(double)(center.ST*src->width)/100.0;
  280. double centerY=(double)(center.ND*src->height)/100.0;
  281. int width = src->width;
  282. int height = src->height;
  283. //cerr << "step2\n";
  284. shift.ST = calc_shift(0,centerX-1,centerX,distortion);
  285. double newcenterX = width-centerX;
  286. pair <double, double> shift_2;
  287. shift_2.ST = calc_shift(0,newcenterX-1,newcenterX,distortion);
  288. //cerr << "step3\n";
  289. shift.ND = calc_shift(0,centerY-1,centerY,distortion);
  290. double newcenterY = height-centerY;
  291. shift_2.ND = calc_shift(0,newcenterY-1,newcenterY,distortion);
  292. scale.ST = (width-shift.ST-shift_2.ST)/width;
  293. scale.ND = (height-shift.ND-shift_2.ND)/height;
  294. //std::cerr<<xshift<<" "<<shift.ND<<" "<<xscale<<" "<<yscale<<std::endl;
  295. //std::cerr<<cvGetSize(src).height<<std::endl;
  296. //std::cerr<<cvGetSize(src).width<<std::endl;
  297. //cerr << "step3\n";
  298. for(int j=0;j<cvGetSize(dis).height;j++)
  299. {
  300. for(int i=0;i<cvGetSize(dis).width;i++)
  301. {
  302. CvScalar s;
  303. pair <double, double> xy;
  304. xy = getRadial((double)i,(double)j,centerX,centerY,distortion);
  305. sampleImage(src,xy.ND,xy.ST,s);
  306. cvSet2D(dis,j,i,s);
  307. }
  308. }
  309. //cerr << "step4\n";
  310. //cvReleaseImage(&src);
  311. //src = cvCloneImage(dis);
  312. //cvNamedWindow( "Source3", 1 );
  313. //cvShowImage( "Source3", src);
  314. //cvReleaseImage(&src);
  315. cvCopy(dis, src);
  316. //cvNamedWindow( "Source4", 1 );
  317. //cvShowImage( "Source4", dis);
  318. cvReleaseImage(&dis);
  319. }
  320. void camcapture::init_motionpics()
  321. {
  322. difference = cvCreateImage ( motionpicsSize, src->depth, src->nChannels);
  323. compare_pic = cvCreateImage ( cvSize(100,100), IPL_DEPTH_8U, 1);
  324. resized = cvCreateImage ( motionpicsSize, src->depth, src->nChannels);
  325. movingAverage = cvCreateImage( motionpicsSize, IPL_DEPTH_32F, src->nChannels);
  326. dst = cvCreateImage( motionpicsSize, IPL_DEPTH_8U, 1 );
  327. facegrey = cvCreateImage( cvSize(src->width, src->height), IPL_DEPTH_8U, 1 );
  328. boolimage = new bool*[src->height];
  329. env.envmap = new pixel*[src->height];
  330. for (int i = 0; i < src->height; i++)
  331. {
  332. boolimage[i] = new bool[src->width];
  333. }
  334. for (int i = 0; i < src->height; i++)
  335. {
  336. env.envmap[i] = new pixel[src->width];
  337. }
  338. recognitionInProgress = false;
  339. presenceCounter = 0;
  340. overdetect = false;
  341. currentcascade = 0;
  342. env.tabsize = motionpicsSize.width*motionpicsSize.height;
  343. env.global_avg = 150;
  344. env.checked = false;
  345. fun.fun = 0.0;
  346. fun.funcounter = 0;
  347. fun.newfun = 0;
  348. HRDWR.pid = getpid();
  349. first = true;
  350. sleep = false;
  351. halted = false;
  352. tmp_halted = false;
  353. deactiveworking = false;
  354. timer = 0;
  355. prevmax = 0;
  356. deactive_timer = 0;
  357. delay = 100;
  358. sleepdelay = 500;
  359. fps = 25;
  360. if (faceDetectEnabled)
  361. {
  362. for (int i=0;i<faceCascade.size();i++)
  363. {
  364. if( !faceCascade[i] )
  365. {
  366. faceCascade.erase(faceCascade.begin()+i);
  367. cerr << "Couldnt load Face detector\n";
  368. i--;
  369. }
  370. }
  371. if (faceCascade.size() == 0)
  372. {
  373. cerr << "Warning: Could not load specified cascades - trying to fuck this situation like a boss...\n";
  374. rescue_cascades();
  375. }
  376. if (faceCascade.size() == 0)
  377. {
  378. faceDetectEnabled = false;
  379. cerr << "Error: Beeing like a boss failed - could not find any cascade - disabling recognition\n";
  380. }
  381. faceAreas = new vector <CvRect> [faceCascade.size()];
  382. if (faceRecognitionEnabled)
  383. {
  384. hungarianInput = new int *[maxFacesPerImg];
  385. for (int i = 0; i < maxFacesPerImg; i++)
  386. {
  387. hungarianInput[i] = new int[maxFacesPerImg];
  388. }
  389. }
  390. }
  391. }
  392. void camcapture::rescue_cascades()
  393. {
  394. string emergency_cascades[4];
  395. emergency_cascades[0] = "haarcascade_frontalface_alt_tree.xml";
  396. emergency_cascades[1] = "haarcascade_frontalface_alt.xml";
  397. emergency_cascades[2] = "haarcascade_frontalface_alt2.xml";
  398. emergency_cascades[3] = "haarcascade_frontalface_default.xml";
  399. string possible_dir = "/usr/share/opencv/haarcascades/";
  400. for (int i = 0; i<4; i++)
  401. {
  402. faceCascade.push_back((CvHaarClassifierCascade*)cvLoad(&(possible_dir + emergency_cascades[i])[0], 0, 0, 0));
  403. }
  404. for (int i=0;i<faceCascade.size();i++)
  405. {
  406. if( !faceCascade[i] )
  407. {
  408. faceCascade.erase(faceCascade.begin()+i);
  409. i--;
  410. }
  411. }
  412. if (faceCascade.size() == 0)
  413. {
  414. cerr << "Couldnt load Face detector in " << possible_dir << "\n";
  415. possible_dir = "/usr/share/OpenCV/haarcascades/";
  416. for (int i = 0; i<4; i++)
  417. {
  418. faceCascade.push_back((CvHaarClassifierCascade*)cvLoad(&(possible_dir + emergency_cascades[i])[0], 0, 0, 0));
  419. }
  420. for (int i=0;i<faceCascade.size();i++)
  421. {
  422. if( !faceCascade[i] )
  423. {
  424. faceCascade.erase(faceCascade.begin()+i);
  425. i--;
  426. }
  427. }
  428. if (faceCascade.size() == 0)
  429. {
  430. cerr << "Couldnt load Face detector in " << possible_dir << "\n";
  431. possible_dir = "/usr/share/OpenCV/data/haarcascades/";
  432. for (int i = 0; i<4; i++)
  433. {
  434. faceCascade.push_back((CvHaarClassifierCascade*)cvLoad(&(possible_dir + emergency_cascades[i])[0], 0, 0, 0));
  435. }
  436. for (int i=0;i<faceCascade.size();i++)
  437. {
  438. if( !faceCascade[i] )
  439. {
  440. faceCascade.erase(faceCascade.begin()+i);
  441. i--;
  442. }
  443. }
  444. }
  445. if (faceCascade.size() == 0)
  446. cerr << "Couldnt load Face detector in " << possible_dir << "\n";
  447. }
  448. if (faceCascade.size()!=0)
  449. {
  450. cerr << "Success!\n" <<
  451. " _____________ \n" <<
  452. " _/ \\ \n" <<
  453. " / ____________ \n" <<
  454. "| ===( \\ \\ )\n" <<
  455. "| \\____\\/\\___/ \n" <<
  456. " \\_ ,_____ / \n" <<
  457. " \\___'_________/ \n" <<
  458. "\n" <<
  459. " LIKE A BOSS...\n\n";
  460. Configuration::getInstance()->setValue(".cam.system.face_cascades_dir", &possible_dir[0]);
  461. }
  462. }
  463. IplImage* camcapture::get_motionpics(double tolerance, IplImage *input)
  464. {
  465. if (first)
  466. {
  467. difference = cvCloneImage(input);
  468. temp = cvCloneImage(input);
  469. cvConvertScale(input, movingAverage, 1.0, 0.0);
  470. first = false;
  471. }
  472. else
  473. {
  474. cvRunningAvg(input, movingAverage, tolerance, NULL);
  475. }
  476. cvConvertScale(movingAverage,temp, 1.0, 0.0);
  477. cvAbsDiff(input,temp,difference);
  478. cvCvtColor( difference, dst, CV_RGB2GRAY );
  479. cvThreshold(dst, dst, 70, 255, CV_THRESH_BINARY);
  480. return dst;
  481. }
  482. bool** camcapture::img2bool(IplImage *input)
  483. {
  484. int step = input->widthStep;
  485. uchar *data = ( uchar* )input->imageData;
  486. //bool *output[motionpicsSize.height][motionpicsSize.width];
  487. motioncounter = 0;
  488. for( int i = 0; i < motionpicsSize.height; i++ )
  489. {
  490. for( int j = 0 ; j < motionpicsSize.width; j++ )
  491. {
  492. boolimage[i][j] = data[i*step + j];
  493. if (data[i*step + j] > 0)
  494. motioncounter++;
  495. }
  496. }
  497. return boolimage;
  498. }
  499. vector<CvRect> camcapture::detectFaceInImage(IplImage *inputImg, CvHaarClassifierCascade* cascade)
  500. {
  501. // Smallest face size.
  502. CvSize minFeatureSize = cvSize(inputImg->width*minfacesize, inputImg->height*minfacesize);
  503. // Only search for 1 face.
  504. int flags = CV_HAAR_DO_ROUGH_SEARCH;
  505. // How detailed should the search be.
  506. float search_scale_factor = 1.1f;
  507. CvMemStorage* storage;
  508. double t;
  509. CvSize size;
  510. int i, ms, nFaces;
  511. vector <CvRect> retvec;
  512. storage = cvCreateMemStorage(0);
  513. cvClearMemStorage( storage );
  514. retvec.clear();
  515. // Detect all the faces in the greyscale image.
  516. t = (double)cvGetTickCount();
  517. facerects = cvHaarDetectObjects( inputImg, cascade, storage,
  518. search_scale_factor, 3, flags, minFeatureSize);
  519. t = (double)cvGetTickCount() - t;
  520. ms = cvRound( t / ((double)cvGetTickFrequency() * 1000.0) );
  521. nFaces = facerects->total;
  522. //printf("Face Detection took %d ms and found %d objects\n", ms, nFaces);
  523. //cerr << "Detected " << nFaces << " faces witch method: " << currentcascade+1 << "\n";
  524. // Get the first detected face (the biggest).
  525. if (nFaces > 0)
  526. {
  527. for (int j = 0; j<nFaces; j++)
  528. {
  529. retvec.push_back(*(CvRect*)cvGetSeqElem( facerects, j ));
  530. }
  531. retvec = mergePartialFaces(retvec, minMergeArea);
  532. }
  533. cvReleaseMemStorage( &storage );
  534. //cvReleaseHaarClassifierCascade( &cascade );
  535. return retvec; // Return the biggest face found, or (-1,-1,-1,-1).
  536. }
  537. vector <CvRect> camcapture::mergePartialFaces(vector<CvRect> input, double minMatchPerc)
  538. {
  539. //cerr << "start of merging...\n";
  540. vector <pair<int, int> > matched;
  541. matched.clear();
  542. for (int i = 0; i<input.size(); i++)
  543. {
  544. for(int j = i;j<input.size(); j++)
  545. {
  546. if ((input[i].x < input[j].x && input[i].x+input[i].width > input[j].x ) ||
  547. (input[j].x+input[j].height < input[i].x+input[i].height && input[j].x+input[j].height > input[i].x ) ||
  548. (input[j].x < input[i].x && input[j].x+input[j].width > input[i].x ) ||
  549. (input[i].x+input[i].height < input[j].x+input[j].height && input[i].x+input[i].height > input[j].x ) )
  550. {
  551. if ((input[i].y < input[j].y && input[i].y+input[i].height > input[j].y ) ||
  552. (input[j].y+input[j].height < input[i].y+input[i].height && input[j].y+input[j].height > input[i].y ) ||
  553. (input[j].y < input[i].y && input[j].y+input[j].height > input[i].y ) ||
  554. (input[i].y+input[i].height < input[j].y+input[j].height && input[i].y+input[i].height > input[j].y ) )
  555. {
  556. if (i < j)
  557. {
  558. matched.push_back(make_pair(i, j));
  559. }
  560. else if (j < i)
  561. matched.push_back(make_pair(j, i));
  562. }
  563. }
  564. }
  565. }
  566. //if (input.size() > 1)
  567. //{
  568. // cerr << "CAUGHT\n";
  569. // cerr << input[0].x << " " << input[0].width << "\n";
  570. // cerr << input[1].x << " " << input[1].width << "\n";
  571. //}
  572. //cerr << "merging...\n";
  573. vector <int> useless;
  574. useless.clear();
  575. for (int i = 0; i<matched.size(); i++)
  576. {
  577. pair <int, int> duplicated;
  578. if (input[matched[i].ST].x < input[matched[i].ND].x)
  579. duplicated.first = -input[matched[i].ND].x;
  580. else
  581. duplicated.first = -input[matched[i].ST].x;
  582. if (input[matched[i].ST].x+input[matched[i].ST].width < input[matched[i].ND].x+input[matched[i].ND].width)
  583. duplicated.first += input[matched[i].ST].x+input[matched[i].ST].width;
  584. else
  585. duplicated.first += input[matched[i].ND].x+input[matched[i].ND].width;
  586. if (input[matched[i].ST].y < input[matched[i].ND].y)
  587. duplicated.second = -input[matched[i].ST].y;
  588. else
  589. duplicated.second = -input[matched[i].ND].y;
  590. if (input[matched[i].ST].y+input[matched[i].ST].height < input[matched[i].ND].y+input[matched[i].ND].height)
  591. duplicated.second += input[matched[i].ST].y+input[matched[i].ST].height;
  592. else
  593. duplicated.second += input[matched[i].ND].y+input[matched[i].ND].height;
  594. if ((duplicated.first > input[matched[i].ST].width*minMatchPerc || duplicated.first > input[matched[i].ND].width*minMatchPerc) &&
  595. (duplicated.second > input[matched[i].ST].height*minMatchPerc || duplicated.second > input[matched[i].ND].height*minMatchPerc) )
  596. {
  597. if (input[matched[i].ST].x > input[matched[i].ND].x)
  598. input[matched[i].ST].x = input[matched[i].ND].x;
  599. if (input[matched[i].ST].x+input[matched[i].ST].width < input[matched[i].ND].x+input[matched[i].ND].width)
  600. input[matched[i].ST].width = (input[matched[i].ND].x+input[matched[i].ND].width) - input[matched[i].ST].x;
  601. if (input[matched[i].ST].y > input[matched[i].ND].y)
  602. input[matched[i].ST].y = input[matched[i].ND].y;
  603. if (input[matched[i].ST].y+input[matched[i].ST].height < input[matched[i].ND].y+input[matched[i].ND].height)
  604. input[matched[i].ST].height = (input[matched[i].ND].y+input[matched[i].ND].height) - input[matched[i].ST].y;
  605. useless.push_back(matched[i].ND);
  606. //cerr << "merged\n";
  607. }
  608. //else
  609. //cerr << "not merged\n";
  610. }
  611. for (int i = 0; i<useless.size(); i++)
  612. {
  613. input.erase(input.begin() + useless[i] - i);
  614. //cerr << "ERASED\n";
  615. }
  616. //cerr << "end of merging\n";
  617. return input;
  618. }
  619. vector <CvRect> camcapture::generateAvgRect(vector<CvRect> input[], int size)
  620. {
  621. vector <CvRect> retvec;
  622. retvec.clear();
  623. vector <CvRect> tmp;
  624. for (int i = 0; i < input[0].size(); i++)
  625. {
  626. //cerr << "firststep in " << i << " picture of vector 0\n";
  627. tmp.clear();
  628. tmp.push_back(input[0][i]);
  629. for (int j = 1; j<size;j++)
  630. {
  631. //cerr << "step2 in " << j << " second vector \n";
  632. for (int k = 0 ;k < input[j].size();k++)
  633. {
  634. //cerr << "step3 in " << k << " picture - size of" << input[j].size() << "\n";
  635. if (compareRect(input[0][i], input[j][k], minSizeMatch, minPosMatch))
  636. {
  637. //cerr << "MATCH!\n";
  638. tmp.push_back(input[j][k]);
  639. input[j].erase(input[j].begin()+k);
  640. break;
  641. }
  642. //else
  643. //cerr << "-";
  644. }
  645. }
  646. if (tmp.size() == size)
  647. {
  648. //cerr << "MAPPED TRUE FACE\n";
  649. retvec.push_back(input[0][i]);
  650. for (int i = 1; i < size ; i++)
  651. retvec[retvec.size()-1].x += tmp[i].x;
  652. retvec[retvec.size()-1].x /=size;
  653. if (retvec[retvec.size()-1].x < 0)
  654. retvec[retvec.size()-1].x = 0;
  655. for (int i = 1; i < size ; i++)
  656. retvec[retvec.size()-1].y += tmp[i].y;
  657. retvec[retvec.size()-1].y /=size;
  658. if (retvec[retvec.size()-1].y < 0)
  659. retvec[retvec.size()-1].y = 0;
  660. for (int i = 1; i < size ; i++)
  661. retvec[retvec.size()-1].width += tmp[i].width;
  662. retvec[retvec.size()-1].width /=size;
  663. if (retvec[retvec.size()-1].x + retvec[retvec.size()-1].width > facegrey->width)
  664. retvec[retvec.size()-1].width = facegrey->width-retvec[retvec.size()-1].x;
  665. for (int i = 1; i < size ; i++)
  666. retvec[retvec.size()-1].height += tmp[i].height;
  667. retvec[retvec.size()-1].height /=size;
  668. if (retvec[retvec.size()-1].y + retvec[retvec.size()-1].height > facegrey->height)
  669. retvec[retvec.size()-1].height = facegrey->height-retvec[retvec.size()-1].y;
  670. }
  671. }
  672. //cerr << "TOTAL TRUEFACE " << retvec.size() << "\n";
  673. return retvec;
  674. }
  675. bool camcapture::compareRect(CvRect a, CvRect b, double size_precision, double pos_precision)
  676. {
  677. if (a.width <= b.width*(1.0+size_precision) || b.width <= a.width*(1.0+size_precision))
  678. {
  679. if (a.height <= b.height*(1.0+size_precision) || b.height <= a.height*(1.0+size_precision))
  680. {
  681. if (abs((a.x + a.width/2) - (b.x + b.width/2)) < facegrey->width*pos_precision && abs((a.y + a.height/2) - (b.y + b.height/2)) < facegrey->height*pos_precision )
  682. return true;
  683. }
  684. }
  685. return false;
  686. }
  687. void camcapture::unrotate(vector<IplImage *> input, CvHaarClassifierCascade* cascade)
  688. {
  689. // Smallest face size.
  690. CvSize minFeatureSize = cvSize(10, 10);
  691. // Only search for 1 face.
  692. int flags = CV_HAAR_DO_ROUGH_SEARCH;
  693. // How detailed should the search be.
  694. float search_scale_factor = 1.1f;
  695. CvMemStorage* storage;
  696. double t;
  697. CvSeq* rects;
  698. int ms, nEyes;
  699. vector<pair <pair <int,int>, pair <int,int> > > newrotvec;
  700. newrotvec.clear();
  701. storage = cvCreateMemStorage(0);
  702. cvClearMemStorage( storage );
  703. for (int i=0;i<input.size() && correctcascade[4];i++)
  704. {
  705. // Detect all the faces in the greyscale image.
  706. t = (double)cvGetTickCount();
  707. rects = cvHaarDetectObjects( input[i], cascade, storage,
  708. search_scale_factor, 2, flags, minFeatureSize);
  709. t = (double)cvGetTickCount() - t;
  710. ms = cvRound( t / ((double)cvGetTickFrequency() * 1000.0) );
  711. nEyes = rects->total;
  712. //printf("Face Detection took %d ms and found %d objects\n", ms, nFaces);
  713. //cerr << "Detected " << nEyes << " eyes\n";
  714. if (nEyes == 2)
  715. {
  716. newrotvec.push_back(make_pair(make_pair((*(CvRect*)cvGetSeqElem( rects, 0 )).x +(*(CvRect*)cvGetSeqElem( rects, 0 )).width/2 , (*(CvRect*)cvGetSeqElem( rects, 0 )).y +(*(CvRect*)cvGetSeqElem( rects, 0 )).height/2), make_pair((*(CvRect*)cvGetSeqElem( rects, 1 )).x +(*(CvRect*)cvGetSeqElem( rects, 1 )).width/2 , (*(CvRect*)cvGetSeqElem( rects, 1 )).y +(*(CvRect*)cvGetSeqElem( rects, 1 )).height/2)));
  717. cvRectangle(
  718. input[i],
  719. cvPoint((*(CvRect*)cvGetSeqElem( rects, 0 )).x, (*(CvRect*)cvGetSeqElem( rects, 0 )).y),
  720. cvPoint((*(CvRect*)cvGetSeqElem( rects, 0 )).x + (*(CvRect*)cvGetSeqElem( rects, 0 )).width, (*(CvRect*)cvGetSeqElem( rects, 0 )).y + (*(CvRect*)cvGetSeqElem( rects, 0 )).height),
  721. CV_RGB(255, 255, 255),
  722. 1, 8, 0
  723. );
  724. cvRectangle(
  725. input[i],
  726. cvPoint((*(CvRect*)cvGetSeqElem( rects, 1 )).x, (*(CvRect*)cvGetSeqElem( rects, 1 )).y),
  727. cvPoint((*(CvRect*)cvGetSeqElem( rects, 1 )).x + (*(CvRect*)cvGetSeqElem( rects, 1 )).width, (*(CvRect*)cvGetSeqElem( rects, 1 )).y + (*(CvRect*)cvGetSeqElem( rects, 1 )).height),
  728. CV_RGB(255, 255, 255),
  729. 1, 8, 0
  730. );
  731. }
  732. else if (i < rotvec.size())
  733. newrotvec.push_back(rotvec[i]);
  734. else
  735. newrotvec.push_back(make_pair(make_pair(-1, -1), make_pair(-1, -1)));
  736. if (newrotvec[i].ST.ST != -1)
  737. {
  738. //cerr << "ROTATING\n";
  739. double x = rotvec[i].ST.ST-rotvec[i].ND.ST;
  740. if (x<0)
  741. x = -x;
  742. double y = rotvec[i].ST.ND-rotvec[i].ND.ND;
  743. if (rotvec[i].ST.ST > rotvec[i].ND.ST)
  744. y = -y;
  745. double d = sqrt(x*x + y*y);
  746. float m[6];
  747. CvMat M = cvMat(2, 3, CV_32F, m);
  748. int w = input[i]->width;
  749. int h = input[i]->height;
  750. m[0] = (float)( cos(y/d) );
  751. m[1] = (float)( sin(x/d) );
  752. m[3] = -m[1];
  753. m[4] = m[0];
  754. m[2] = w*0.5f;
  755. m[5] = h*0.5f;
  756. //cerr << m[0] << " " << m[1] << "\n";
  757. // Make a spare image for the result
  758. CvSize sizeRotated;
  759. sizeRotated.width = cvRound(w);
  760. sizeRotated.height = cvRound(h);
  761. // Rotate
  762. IplImage *imageRotated = cvCreateImage( sizeRotated,
  763. input[i]->depth, input[i]->nChannels );
  764. // Transform the image
  765. cvGetQuadrangleSubPix( input[i], imageRotated, &M);
  766. input[i] = imageRotated;
  767. }
  768. rotvec = newrotvec;
  769. // Get the first detected face (the biggest).
  770. }
  771. cvReleaseMemStorage( &storage );
  772. //cvReleaseHaarClassifierCascade( &cascade );
  773. }
  774. vector<IplImage*> camcapture::cropImages(IplImage *input, vector<CvRect> region)
  775. {
  776. //faceimg.clear();
  777. vector <IplImage*> retvec;
  778. IplImage *imageCropped;
  779. IplImage *croptemp = cvCreateImage(cvSize(input->width, input->height), input->depth, input->nChannels);
  780. cvCopy(input, croptemp);
  781. // Set the desired region of interest.
  782. for (int i = 0; i < region.size(); i++)
  783. {
  784. cvSetImageROI(croptemp, region[i]);
  785. // Copy region of interest into a new iplImage and return it.
  786. imageCropped = cvCreateImage(cvSize(region[i].width, region[i].height), croptemp->depth, croptemp->nChannels);
  787. cvCopy(croptemp, imageCropped);
  788. retvec.push_back(cvCreateImage(cvSize(100,100), imageCropped->depth, imageCropped->nChannels));// Copy just the region.
  789. cvResize(imageCropped, retvec[i]);
  790. cvEqualizeHist(retvec[i], retvec[i]);
  791. cvReleaseImage(&imageCropped);
  792. }
  793. cvReleaseImage(&croptemp);
  794. return retvec;
  795. }
  796. int camcapture::searchFace(IplImage *input, Ptr<FaceRecognizer> inputModel, double precision)
  797. {
  798. if (ccap.faceRecognitionFirstRun)
  799. return -1;
  800. Mat converted(input);
  801. int predictedLabel = -1;
  802. inputModel->predict(converted, predictedLabel, precision);
  803. return predictedLabel;
  804. }
  805. /*
  806. hungarian input:
  807. R
  808. [1][2][3][4]
  809. L [2]
  810. [3]
  811. [4]
  812. */
  813. vector <PII> camcapture::trackFaces(vector<CvRect> inputL, vector<CvRect> inputR, int maxDist, double ignoreDist)
  814. {
  815. int newsize = maxFacesPerImg;
  816. bool toresize = false;
  817. if (inputL.size() > newsize)
  818. {
  819. toresize = true;
  820. newsize = inputL.size();
  821. }
  822. if (inputR.size() > newsize)
  823. {
  824. toresize = true;
  825. newsize = inputR.size();
  826. }
  827. if (toresize)
  828. {
  829. for (int i = 0; i < maxFacesPerImg; i++)
  830. {
  831. for (int j = 0; j < maxFacesPerImg; j++)
  832. {
  833. delete(&hungarianInput[i][j]);
  834. }
  835. delete(&hungarianInput[i]);
  836. }
  837. delete (&hungarianInput);
  838. maxFacesPerImg = newsize;
  839. hungarianInput = new int * [maxFacesPerImg];
  840. for (int i = 0; i < maxFacesPerImg; i++)
  841. {
  842. hungarianInput[i] = new int[maxFacesPerImg];
  843. }
  844. }
  845. vector <pair < int, int > > retvec (0);
  846. if (inputL.size() == 0 && inputR.size() == 0)
  847. return retvec;
  848. if (inputR.size() == 1 && inputL.size() == 1)
  849. {
  850. if (sqrt( ((inputR[0].x+inputR[0].width/2)-(inputL[0].x+inputL[0].width/2))*((inputR[0].x+inputR[0].width/2)-(inputL[0].x+inputL[0].width/2)) +
  851. ((inputR[0].y+inputR[0].height/2)-(inputL[0].y+inputL[0].height/2))*((inputR[0].y+inputR[0].height/2)-(inputL[0].y+inputL[0].height/2)) ) > (double)maxDist*ignoreDist)
  852. {
  853. retvec.push_back(make_pair (-1, 0));
  854. retvec.push_back(make_pair (0, -1));
  855. }
  856. else
  857. retvec.push_back(make_pair (0, 0));
  858. return retvec;
  859. }
  860. if (inputR.size() == 1 && inputL.size() == 0)
  861. {
  862. retvec.push_back(make_pair(-1, 0));
  863. return retvec;
  864. }
  865. if (inputR.size() == 0 && inputL.size() == 1)
  866. {
  867. retvec.push_back(make_pair(0, -1));
  868. return retvec;
  869. }
  870. cerr << "Beginning standard matching process:\nCounting distances...\n";
  871. for (int i = 0; i < inputL.size(); i++)
  872. {
  873. for (int j = 0; j < inputR.size(); j++)
  874. {
  875. cerr << "x A: " << (inputR[j].x+inputR[j].width/2) << " x B: " << (inputL[i].x+inputL[i].width/2) << "\n" <<
  876. "y A: " << (inputR[j].y+inputR[j].height/2) << " y B: " << (inputL[i].y+inputL[i].height/2) << "\n" <<
  877. "delta x = " << ((inputR[j].x+inputR[j].width/2)-(inputL[i].x+inputL[i].width/2))*((inputR[j].x+inputR[j].width/2)-(inputL[i].x+inputL[i].width/2)) << "\n" <<
  878. "delta y = " << ((inputR[j].y+inputR[j].height/2)-(inputL[i].y+inputL[i].height/2))*((inputR[j].y+inputR[j].height/2)-(inputL[i].y+inputL[i].height/2)) << "\n";
  879. hungarianInput[i][j] = sqrt( ((inputR[j].x+inputR[j].width/2)-(inputL[i].x+inputL[i].width/2))*((inputR[j].x+inputR[j].width/2)-(inputL[i].x+inputL[i].width/2)) +
  880. ((inputR[j].y+inputR[j].height/2)-(inputL[i].y+inputL[i].height/2))*((inputR[j].y+inputR[j].height/2)-(inputL[i].y+inputL[i].height/2)) );
  881. cerr << "original: " << hungarianInput[i][j] << "\n";
  882. if (hungarianInput[i][j] < maxDist*ignoreDist)
  883. {
  884. cerr << "equalizing...\n";
  885. cerr << hungarianInput[i][j] << " < " << maxDist*ignoreDist << "\n";
  886. hungarianInput[i][j] = (int)HRDWR.equalize(0, maxDist, hungarianInput[i][j], 1.5);
  887. }
  888. else //blacklist element:
  889. {
  890. cerr << hungarianInput[i][j] << " < " << maxDist*ignoreDist << "\n";
  891. hungarianInput[i][j] = maxDist;
  892. }
  893. cerr << "equalized: " << hungarianInput[i][j] << "\n";
  894. }
  895. }
  896. cerr << "dupa" << (int)HRDWR.equalize(0, maxDist, 3, 1.5) << "\n";
  897. for (int i = 0; i < inputR.size(); i++)
  898. {
  899. for (int j = 0; j < inputL.size(); j++)
  900. {
  901. cerr << hungarianInput[j][i] << " ";
  902. }
  903. cerr << "\n";
  904. }
  905. cerr << "DUN!\nbaking hungarian vector:\n";
  906. if (inputL.size() < inputR.size())
  907. {
  908. cerr << "switched to L < R...\nResizing to nxn matrix...";
  909. for (int i = inputL.size(); i < inputR.size(); i++)
  910. {
  911. for (int j = 0; j < inputR.size(); j++)
  912. {
  913. hungarianInput[i][j] = maxDist;
  914. }
  915. }
  916. cerr << "DUN!\nGetting return statement from hungarian method...";
  917. retvec = hungarian::hungarian(inputR.size(), inputR.size(), hungarianInput);
  918. cerr << "DUN!\nConverting to -1 notation...";
  919. // convert to correct notation:
  920. for (int i = 0; i < retvec.size(); i++)
  921. {
  922. if (retvec[i].ST >= inputL.size())
  923. retvec[i].ST = -1;
  924. }
  925. cerr << "DUN!\neliminating blacklisted...\n";
  926. }
  927. else
  928. {
  929. cerr << "switched to L >= R...\nResizing to nxn matrix...";
  930. for (int i = inputR.size(); i < inputL.size(); i++)
  931. {
  932. for (int j = 0; j < inputL.size(); j++)
  933. {
  934. hungarianInput[j][i] = maxDist;
  935. }
  936. }
  937. cerr << "DUN!\nGetting return statement from hungarian method...";
  938. retvec = hungarian::hungarian(inputL.size(), inputL.size(), hungarianInput);
  939. cerr << "DUN!\nConverting to -1 notation...";
  940. // convert to correct notation:
  941. for (int i = 0; i < retvec.size(); i++)
  942. {
  943. if (retvec[i].ND >= inputR.size())
  944. retvec[i].ND = -1;
  945. }
  946. cerr << "DUN!\neliminating blacklisted...\n";
  947. }
  948. for (int i = 0; i < inputR.size(); i++)
  949. {
  950. for (int j = 0; j < inputL.size(); j++)
  951. {
  952. cerr << hungarianInput[j][i] << " ";
  953. }
  954. cerr << "\n";
  955. }
  956. cerr << "input vector:\n";
  957. for (int i = 0; i < retvec.size(); i++)
  958. {
  959. cerr << retvec[i].ST << " -> " << retvec[i].ND << "\n";
  960. }
  961. //eliminate blacklisted
  962. cerr << "beginning...\n";
  963. for (int i = 0; i < retvec.size(); i++)
  964. {
  965. if (retvec[i].ST != -1 && retvec[i].ND != -1)
  966. {
  967. if (hungarianInput[retvec[i].ST][retvec[i].ND] == maxDist)
  968. {
  969. cerr << "erasing...";
  970. retvec.push_back(make_pair(retvec[i].ST, -1));
  971. retvec.push_back(make_pair(-1, retvec[i].ND));
  972. retvec.erase(retvec.begin()+i);
  973. i--;
  974. cerr << "DUN!\n";
  975. }
  976. }
  977. }
  978. return retvec;
  979. }
  980. /*
  981. face 1: [0][0][1][0][2]
  982. face 2: [1][1][0][1][0]
  983. face 3: [1]
  984. [0][0][0][0][0]
  985. [1][1][2][1][1]
  986. [-1]
  987. 0 1
  988. 1 2
  989. 2 -1
  990. 3 0
  991. 0 1
  992. 1 2
  993. 2 0
  994. -1 3
  995. */
  996. bool camcapture::addFaceData(vector<IplImage *> input, vector<PII> inputmatches, vector<vector <int> > *prevrecords)
  997. {
  998. //if there is no faces on image - clear everything
  999. if (inputmatches.size() == 0)
  1000. {
  1001. prevrecords[0].clear();
  1002. prevrecords[1].clear();
  1003. newFacesImgs.clear();
  1004. faceSamples.clear();
  1005. return 0;
  1006. }
  1007. //baking faces paths vectors
  1008. cerr << "Baking faces paths:\n";
  1009. cerr << inputmatches.size() << "\n";
  1010. for (int i; i < inputmatches.size(); i++)
  1011. cerr << inputmatches[i].ST << " " << inputmatches[i].ND << "\n";
  1012. cerr << "prevrec size: " << prevrecords[0].size() << "\n";
  1013. for (int i = 0; i<prevrecords[0].size(); i++)
  1014. {
  1015. // search existing faces
  1016. int searchfor = prevrecords[0][i][prevrecords[0][i].size()-1];
  1017. for (int j = 0; j < inputmatches.size(); j++)
  1018. {
  1019. if (inputmatches[j].ST == searchfor)
  1020. {
  1021. if (inputmatches[j].ND != -1)
  1022. {
  1023. //if success - erase old records and matching pair, and add new face position and face prediction index
  1024. if (prevrecords[0][i].size() == maxRecognitionBufferSize)
  1025. {
  1026. cerr << "erasing old records...";
  1027. prevrecords[0][i].erase(prevrecords[0][i].begin()+0);
  1028. prevrecords[1][i].erase(prevrecords[1][i].begin()+0);
  1029. cerr << "DUN!\n";
  1030. }
  1031. faceSamples[i]++;
  1032. prevrecords[0][i].push_back(inputmatches[j].ND);
  1033. prevrecords[1][i].push_back(searchFace(input[inputmatches[j].ND], facesModel, faceRecognisePrecision));
  1034. inputmatches.erase(inputmatches.begin() + j);
  1035. break;
  1036. }
  1037. else
  1038. {
  1039. //if failed - delete face record path and pair
  1040. for (int j = 0; j < newFacesImgs.size(); j++)
  1041. {
  1042. if (newFacesImgs[j].ST == i)
  1043. {
  1044. newFacesImgs.erase(newFacesImgs.begin()+j);
  1045. break;
  1046. }
  1047. }
  1048. faceSamples.erase(faceSamples.begin()+i);
  1049. prevrecords[0].erase(prevrecords[0].begin()+i);
  1050. prevrecords[1].erase(prevrecords[1].begin()+i);
  1051. inputmatches.erase(inputmatches.begin() + j);
  1052. i--;
  1053. break;
  1054. }
  1055. }
  1056. }
  1057. }
  1058. cerr << "result " << prevrecords[0].size() << "\n";
  1059. // if there are still untracked faces...
  1060. cerr << "Tracking for new faces\n";
  1061. if (inputmatches.size() != 0)
  1062. {
  1063. for (int i = 0; i < inputmatches.size(); i++)
  1064. {
  1065. if (inputmatches[i].ST == -1)
  1066. {
  1067. // add them if everything works correctly
  1068. cerr << "addin new record...";
  1069. vector <int> newface (0);
  1070. prevrecords[0].push_back(newface);
  1071. prevrecords[1].push_back(newface);
  1072. prevrecords[0][prevrecords[0].size()-1].push_back(inputmatches[i].ND);
  1073. prevrecords[1][prevrecords[1].size()-1].push_back(searchFace(input[inputmatches[i].ND], facesModel, faceRecognisePrecision));
  1074. faceSamples.push_back(1);
  1075. cerr << "DUN!:\n";
  1076. cerr << prevrecords[0][prevrecords[0].size()-1][prevrecords[0][prevrecords[0].size()-1].size() -1] << " " <<
  1077. prevrecords[1][prevrecords[1].size()-1][prevrecords[1][prevrecords[1].size()-1].size() -1] << " " <<
  1078. faceSamples[faceSamples.size()-1] << "\n";
  1079. }
  1080. else
  1081. {
  1082. cerr << "We just don't know what went wrong! There is face which should be tracked before...\n";
  1083. return 0;
  1084. }
  1085. }
  1086. }
  1087. for (int i = 0; i < prevrecords[0].size(); i++)
  1088. {
  1089. for (int j = 0; j < prevrecords[0][i].size(); j++)
  1090. cerr << "[" << prevrecords[0][i][j] << "]";
  1091. cerr << "\n";
  1092. }
  1093. cerr << "\n";
  1094. for (int i = 0; i < prevrecords[1].size(); i++)
  1095. {
  1096. for (int j = 0; j < prevrecords[1][i].size(); j++)
  1097. cerr << "[" << prevrecords[1][i][j] << "]";
  1098. cerr << "\n";
  1099. }
  1100. //get average recognision if there is at least minimal samplaes number
  1101. cerr << "recognition\n";
  1102. avgRecognitions.clear();
  1103. for (int i = 0; i < prevrecords[1].size(); i++)
  1104. {
  1105. cerr << "reco step 1...";
  1106. if (prevrecords[1][i].size() == maxRecognitionBufferSize)
  1107. {
  1108. vector <pair <int, int> > counter (0);
  1109. for (int j = 0; j < prevrecords[1][i].size(); j++)
  1110. {
  1111. bool present = false;
  1112. for (int k = 0; k < counter.size(); k++)
  1113. {
  1114. if (counter[k].ST == prevrecords[1][i][j])
  1115. {
  1116. counter[k].ND++;
  1117. present = true;
  1118. break;
  1119. }
  1120. }
  1121. if (!present)
  1122. counter.push_back(make_pair(prevrecords[1][i][j], 1));
  1123. }
  1124. pair <int, int> max = make_pair(0, 0);
  1125. for (int j = 0; j < counter.size(); j++)
  1126. {
  1127. if (counter[j].ND > max.ND)
  1128. max = counter[j];
  1129. }
  1130. if (max.ND > maxRecognitionBufferSize/2)
  1131. avgRecognitions.push_back(max.ST);
  1132. else
  1133. avgRecognitions.push_back(-2);
  1134. }
  1135. else
  1136. avgRecognitions.push_back(-2);
  1137. cerr << "DUN!\nreco step 2...";
  1138. if ((prevrecords[1][i][prevrecords[1][i].size()-1] == -1 && prevrecords[1][i].size() < maxRecognitionBufferSize) || avgRecognitions[i] == -1)
  1139. {
  1140. if (prevrecords[1][i][prevrecords[1][i].size()-1] == -1 && prevrecords[1][i].size() < maxRecognitionBufferSize + newFaceOverdetectSkipSamples && prevrecords[1][i].size() > newFaceOverdetectSkipSamples)
  1141. overdetect = true;
  1142. bool present = false;
  1143. for (int j = 0; j < newFacesImgs.size(); j++)
  1144. {
  1145. if (newFacesImgs[j].ST == i)
  1146. {
  1147. newFacesImgs[j].ND.push_back(*cvCloneImage(input[prevrecords[0][i][prevrecords[0][i].size()-1]]));
  1148. present = true;
  1149. }
  1150. }
  1151. if (!present)
  1152. {
  1153. vector <IplImage> newface (0);
  1154. newFacesImgs.push_back(make_pair(i, newface));
  1155. newFacesImgs[newFacesImgs.size()-1].ND.push_back(*cvCloneImage(input[prevrecords[0][i][prevrecords[0][i].size()-1]]));
  1156. }
  1157. }
  1158. cerr << "DUN!\n";
  1159. }
  1160. cerr << "\n";
  1161. for (int i = 0; i < avgRecognitions.size(); i++)
  1162. cerr << "[" << avgRecognitions[i] << "] (" << faceSamples[i] << ")\n";
  1163. bool toreload = false;
  1164. // chceck if face save record is necessary:
  1165. cerr << "savin\'\n";
  1166. for (int i = 0; i < faceSamples.size(); i++)
  1167. {
  1168. if (faceSamples[i] >= faceImageDropDelay)
  1169. {
  1170. if (avgRecognitions[i] != -2 && avgRecognitions[i] != -1 && avgRecognitions[i] == prevrecords[1][i][prevrecords[1][i].size()-1])
  1171. {
  1172. faceSamples[i] = 0;
  1173. facesBank.push_back(Mat (input[prevrecords[0][i][prevrecords[0][i].size()-1]]));
  1174. facesBankIndex.push_back(avgRecognitions[i]);
  1175. facesBankQuantities[avgRecognitions[i]]++;
  1176. stringstream ss, ss2, ss3, ss4;
  1177. ss << avgRecognitions[i];
  1178. bool holeExists = false;
  1179. if (avgRecognitions[i] < ccap.freeFaceImgs.size())
  1180. if (ccap.freeFaceImgs[avgRecognitions[i]].size() > 0)
  1181. holeExists = true;
  1182. if (!holeExists)
  1183. {
  1184. ss2 << facesBankQuantities[avgRecognitions[i]];
  1185. ss3 << (facesBankQuantities[facesBankIndex[facesBankIndex.size()-1]] - 1);
  1186. HRDWR.set_file(ccap.facesBankPath + ss.str() + "/" + "size", ss2.str());
  1187. }
  1188. else
  1189. {
  1190. ss3 << *new int (ccap.freeFaceImgs[avgRecognitions[i]][ccap.freeFaceImgs[avgRecognitions[i]].size() - 1]);
  1191. ccap.freeFaceImgs[avgRecognitions[i]].erase(ccap.freeFaceImgs[avgRecognitions[i]].begin() + ccap.freeFaceImgs[avgRecognitions[i]].size() - 1);
  1192. }
  1193. cerr << "No new face detected, face " << ss.str() << " bank size changed to " << ss2.str() << "\n";
  1194. ss4 << facesBankIndex[facesBankIndex.size()-1];
  1195. const string path = string (ccap.facesBankPath + ss4.str() + "/" + ss3.str() + ".jpg");
  1196. imwrite(path, norm_0_255(ccap.facesBank[facesBank.size()-1].reshape(1, ccap.facesBank[facesBank.size()-1].rows)));
  1197. cerr << "File :" << path << " saved\n";
  1198. toreload = true;
  1199. }
  1200. }
  1201. }
  1202. //search for filled new face bases:
  1203. cerr << "savin\'2\n";
  1204. for (int i = 0; i < newFacesImgs.size(); i++)
  1205. {
  1206. if (newFacesImgs[i].ND.size() == maxRecognitionBufferSize + newFaceOverdetectSkipSamples)
  1207. {
  1208. cerr << "Detected new face:\n";
  1209. stringstream ss, ss2, ss3;
  1210. ss << facesBankQuantities.size();
  1211. if ( -1 == mkdir ( &(ccap.facesBankPath + ss.str() + "/")[0], S_IRUSR | S_IWUSR | S_IXUSR ) and errno != EEXIST )
  1212. {
  1213. cerr << "Couldn't create faces dir - disabling recognition";
  1214. ccap.faceRecognitionEnabled = false;
  1215. }
  1216. ss3 << maxRecognitionBufferSize;
  1217. HRDWR.set_file(ccap.facesBankPath + ss.str() + "/" + "size", ss3.str());
  1218. facesBankQuantities.push_back(maxRecognitionBufferSize);
  1219. ss2 << facesBankQuantities.size();
  1220. HRDWR.set_file(ccap.facesBankPath + "size", ss2.str());
  1221. cerr << "sizeof new face set to " << ss3.str() << ", new dir name: " << ss.str() << ". Bank size changed to: " << ss2.str() << "\n";
  1222. for (int j = newFaceOverdetectSkipSamples; j < maxRecognitionBufferSize + newFaceOverdetectSkipSamples; j++)
  1223. {
  1224. facesBank.push_back(Mat (&newFacesImgs[i].ND[j]));
  1225. facesBankIndex.push_back(facesBankQuantities.size()-1);
  1226. stringstream ss, ss2;
  1227. ss << (j-newFaceOverdetectSkipSamples);
  1228. ss2 << (facesBankQuantities.size()-1);
  1229. const string path = string (ccap.facesBankPath + ss2.str() + "/" + ss.str() + ".