PageRenderTime 48ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 0ms

/OpenCV-2.4.0/samples/cpp/stitching_detailed.cpp

https://bitbucket.org/adamspurgin/finalproject
C++ | 748 lines | 616 code | 71 blank | 61 comment | 231 complexity | c751a856d79dd8092a3ec29ff196bd1d MD5 | raw file
Possible License(s): BSD-3-Clause
  1. /*M///////////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  4. //
  5. // By downloading, copying, installing or using the software you agree to this license.
  6. // If you do not agree to this license, do not download, install,
  7. // copy or use the software.
  8. //
  9. //
  10. // License Agreement
  11. // For Open Source Computer Vision Library
  12. //
  13. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
  14. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
  15. // Third party copyrights are property of their respective owners.
  16. //
  17. // Redistribution and use in source and binary forms, with or without modification,
  18. // are permitted provided that the following conditions are met:
  19. //
  20. // * Redistribution's of source code must retain the above copyright notice,
  21. // this list of conditions and the following disclaimer.
  22. //
  23. // * Redistribution's in binary form must reproduce the above copyright notice,
  24. // this list of conditions and the following disclaimer in the documentation
  25. // and/or other materials provided with the distribution.
  26. //
  27. // * The name of the copyright holders may not be used to endorse or promote products
  28. // derived from this software without specific prior written permission.
  29. //
  30. // This software is provided by the copyright holders and contributors "as is" and
  31. // any express or implied warranties, including, but not limited to, the implied
  32. // warranties of merchantability and fitness for a particular purpose are disclaimed.
  33. // In no event shall the Intel Corporation or contributors be liable for any direct,
  34. // indirect, incidental, special, exemplary, or consequential damages
  35. // (including, but not limited to, procurement of substitute goods or services;
  36. // loss of use, data, or profits; or business interruption) however caused
  37. // and on any theory of liability, whether in contract, strict liability,
  38. // or tort (including negligence or otherwise) arising in any way out of
  39. // the use of this software, even if advised of the possibility of such damage.
  40. //
  41. //
  42. //M*/
  43. #include <fstream>
  44. #include <string>
  45. #include "opencv2/opencv_modules.hpp"
  46. #include "opencv2/highgui/highgui.hpp"
  47. #include "opencv2/stitching/detail/autocalib.hpp"
  48. #include "opencv2/stitching/detail/blenders.hpp"
  49. #include "opencv2/stitching/detail/camera.hpp"
  50. #include "opencv2/stitching/detail/exposure_compensate.hpp"
  51. #include "opencv2/stitching/detail/matchers.hpp"
  52. #include "opencv2/stitching/detail/motion_estimators.hpp"
  53. #include "opencv2/stitching/detail/seam_finders.hpp"
  54. #include "opencv2/stitching/detail/util.hpp"
  55. #include "opencv2/stitching/detail/warpers.hpp"
  56. #include "opencv2/stitching/warpers.hpp"
  57. using namespace std;
  58. using namespace cv;
  59. using namespace cv::detail;
  60. void printUsage()
  61. {
  62. cout <<
  63. "Rotation model images stitcher.\n\n"
  64. "stitching_detailed img1 img2 [...imgN] [flags]\n\n"
  65. "Flags:\n"
  66. " --preview\n"
  67. " Run stitching in the preview mode. Works faster than usual mode,\n"
  68. " but output image will have lower resolution.\n"
  69. " --try_gpu (yes|no)\n"
  70. " Try to use GPU. The default value is 'no'. All default values\n"
  71. " are for CPU mode.\n"
  72. "\nMotion Estimation Flags:\n"
  73. " --work_megapix <float>\n"
  74. " Resolution for image registration step. The default is 0.6 Mpx.\n"
  75. " --features (surf|orb)\n"
  76. " Type of features used for images matching. The default is surf.\n"
  77. " --match_conf <float>\n"
  78. " Confidence for feature matching step. The default is 0.65 for surf and 0.3 for orb.\n"
  79. " --conf_thresh <float>\n"
  80. " Threshold for two images are from the same panorama confidence.\n"
  81. " The default is 1.0.\n"
  82. " --ba (reproj|ray)\n"
  83. " Bundle adjustment cost function. The default is ray.\n"
  84. " --ba_refine_mask (mask)\n"
  85. " Set refinement mask for bundle adjustment. It looks like 'x_xxx',\n"
  86. " where 'x' means refine respective parameter and '_' means don't\n"
  87. " refine one, and has the following format:\n"
  88. " <fx><skew><ppx><aspect><ppy>. The default mask is 'xxxxx'. If bundle\n"
  89. " adjustment doesn't support estimation of selected parameter then\n"
  90. " the respective flag is ignored.\n"
  91. " --wave_correct (no|horiz|vert)\n"
  92. " Perform wave effect correction. The default is 'horiz'.\n"
  93. " --save_graph <file_name>\n"
  94. " Save matches graph represented in DOT language to <file_name> file.\n"
  95. " Labels description: Nm is number of matches, Ni is number of inliers,\n"
  96. " C is confidence.\n"
  97. "\nCompositing Flags:\n"
  98. " --warp (plane|cylindrical|spherical|fisheye|stereographic|compressedPlaneA2B1|compressedPlaneA1.5B1|compressedPlanePortraitA2B1|compressedPlanePortraitA1.5B1|paniniA2B1|paniniA1.5B1|paniniPortraitA2B1|paniniPortraitA1.5B1|mercator|transverseMercator)\n"
  99. " Warp surface type. The default is 'spherical'.\n"
  100. " --seam_megapix <float>\n"
  101. " Resolution for seam estimation step. The default is 0.1 Mpx.\n"
  102. " --seam (no|voronoi|gc_color|gc_colorgrad)\n"
  103. " Seam estimation method. The default is 'gc_color'.\n"
  104. " --compose_megapix <float>\n"
  105. " Resolution for compositing step. Use -1 for original resolution.\n"
  106. " The default is -1.\n"
  107. " --expos_comp (no|gain|gain_blocks)\n"
  108. " Exposure compensation method. The default is 'gain_blocks'.\n"
  109. " --blend (no|feather|multiband)\n"
  110. " Blending method. The default is 'multiband'.\n"
  111. " --blend_strength <float>\n"
  112. " Blending strength from [0,100] range. The default is 5.\n"
  113. " --output <result_img>\n"
  114. " The default is 'result.jpg'.\n";
  115. }
  116. // Default command line args
  117. vector<string> img_names;
  118. bool preview = false;
  119. bool try_gpu = false;
  120. double work_megapix = 0.6;
  121. double seam_megapix = 0.1;
  122. double compose_megapix = -1;
  123. float conf_thresh = 1.f;
  124. string features = "surf";
  125. string ba_cost_func = "ray";
  126. string ba_refine_mask = "xxxxx";
  127. bool do_wave_correct = true;
  128. WaveCorrectKind wave_correct = detail::WAVE_CORRECT_HORIZ;
  129. bool save_graph = false;
  130. std::string save_graph_to;
  131. string warp_type = "spherical";
  132. int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
  133. float match_conf = 0.3f;
  134. string seam_find_type = "gc_color";
  135. int blend_type = Blender::MULTI_BAND;
  136. float blend_strength = 5;
  137. string result_name = "result.jpg";
  138. int parseCmdArgs(int argc, char** argv)
  139. {
  140. if (argc == 1)
  141. {
  142. printUsage();
  143. return -1;
  144. }
  145. for (int i = 1; i < argc; ++i)
  146. {
  147. if (string(argv[i]) == "--help" || string(argv[i]) == "/?")
  148. {
  149. printUsage();
  150. return -1;
  151. }
  152. else if (string(argv[i]) == "--preview")
  153. {
  154. preview = true;
  155. }
  156. else if (string(argv[i]) == "--try_gpu")
  157. {
  158. if (string(argv[i + 1]) == "no")
  159. try_gpu = false;
  160. else if (string(argv[i + 1]) == "yes")
  161. try_gpu = true;
  162. else
  163. {
  164. cout << "Bad --try_gpu flag value\n";
  165. return -1;
  166. }
  167. i++;
  168. }
  169. else if (string(argv[i]) == "--work_megapix")
  170. {
  171. work_megapix = atof(argv[i + 1]);
  172. i++;
  173. }
  174. else if (string(argv[i]) == "--seam_megapix")
  175. {
  176. seam_megapix = atof(argv[i + 1]);
  177. i++;
  178. }
  179. else if (string(argv[i]) == "--compose_megapix")
  180. {
  181. compose_megapix = atof(argv[i + 1]);
  182. i++;
  183. }
  184. else if (string(argv[i]) == "--result")
  185. {
  186. result_name = argv[i + 1];
  187. i++;
  188. }
  189. else if (string(argv[i]) == "--features")
  190. {
  191. features = argv[i + 1];
  192. if (features == "orb")
  193. match_conf = 0.3f;
  194. i++;
  195. }
  196. else if (string(argv[i]) == "--match_conf")
  197. {
  198. match_conf = static_cast<float>(atof(argv[i + 1]));
  199. i++;
  200. }
  201. else if (string(argv[i]) == "--conf_thresh")
  202. {
  203. conf_thresh = static_cast<float>(atof(argv[i + 1]));
  204. i++;
  205. }
  206. else if (string(argv[i]) == "--ba")
  207. {
  208. ba_cost_func = argv[i + 1];
  209. i++;
  210. }
  211. else if (string(argv[i]) == "--ba_refine_mask")
  212. {
  213. ba_refine_mask = argv[i + 1];
  214. if (ba_refine_mask.size() != 5)
  215. {
  216. cout << "Incorrect refinement mask length.\n";
  217. return -1;
  218. }
  219. i++;
  220. }
  221. else if (string(argv[i]) == "--wave_correct")
  222. {
  223. if (string(argv[i + 1]) == "no")
  224. do_wave_correct = false;
  225. else if (string(argv[i + 1]) == "horiz")
  226. {
  227. do_wave_correct = true;
  228. wave_correct = detail::WAVE_CORRECT_HORIZ;
  229. }
  230. else if (string(argv[i + 1]) == "vert")
  231. {
  232. do_wave_correct = true;
  233. wave_correct = detail::WAVE_CORRECT_VERT;
  234. }
  235. else
  236. {
  237. cout << "Bad --wave_correct flag value\n";
  238. return -1;
  239. }
  240. i++;
  241. }
  242. else if (string(argv[i]) == "--save_graph")
  243. {
  244. save_graph = true;
  245. save_graph_to = argv[i + 1];
  246. i++;
  247. }
  248. else if (string(argv[i]) == "--warp")
  249. {
  250. warp_type = string(argv[i + 1]);
  251. i++;
  252. }
  253. else if (string(argv[i]) == "--expos_comp")
  254. {
  255. if (string(argv[i + 1]) == "no")
  256. expos_comp_type = ExposureCompensator::NO;
  257. else if (string(argv[i + 1]) == "gain")
  258. expos_comp_type = ExposureCompensator::GAIN;
  259. else if (string(argv[i + 1]) == "gain_blocks")
  260. expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
  261. else
  262. {
  263. cout << "Bad exposure compensation method\n";
  264. return -1;
  265. }
  266. i++;
  267. }
  268. else if (string(argv[i]) == "--seam")
  269. {
  270. if (string(argv[i + 1]) == "no" ||
  271. string(argv[i + 1]) == "voronoi" ||
  272. string(argv[i + 1]) == "gc_color" ||
  273. string(argv[i + 1]) == "gc_colorgrad")
  274. seam_find_type = argv[i + 1];
  275. else
  276. {
  277. cout << "Bad seam finding method\n";
  278. return -1;
  279. }
  280. i++;
  281. }
  282. else if (string(argv[i]) == "--blend")
  283. {
  284. if (string(argv[i + 1]) == "no")
  285. blend_type = Blender::NO;
  286. else if (string(argv[i + 1]) == "feather")
  287. blend_type = Blender::FEATHER;
  288. else if (string(argv[i + 1]) == "multiband")
  289. blend_type = Blender::MULTI_BAND;
  290. else
  291. {
  292. cout << "Bad blending method\n";
  293. return -1;
  294. }
  295. i++;
  296. }
  297. else if (string(argv[i]) == "--blend_strength")
  298. {
  299. blend_strength = static_cast<float>(atof(argv[i + 1]));
  300. i++;
  301. }
  302. else if (string(argv[i]) == "--output")
  303. {
  304. result_name = argv[i + 1];
  305. i++;
  306. }
  307. else
  308. img_names.push_back(argv[i]);
  309. }
  310. if (preview)
  311. {
  312. compose_megapix = 0.6;
  313. }
  314. return 0;
  315. }
  316. int main(int argc, char* argv[])
  317. {
  318. int64 app_start_time = getTickCount();
  319. cv::setBreakOnError(true);
  320. int retval = parseCmdArgs(argc, argv);
  321. if (retval)
  322. return retval;
  323. // Check if have enough images
  324. int num_images = static_cast<int>(img_names.size());
  325. if (num_images < 2)
  326. {
  327. LOGLN("Need more images");
  328. return -1;
  329. }
  330. double work_scale = 1, seam_scale = 1, compose_scale = 1;
  331. bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;
  332. LOGLN("Finding features...");
  333. int64 t = getTickCount();
  334. Ptr<FeaturesFinder> finder;
  335. if (features == "surf")
  336. {
  337. #ifdef HAVE_OPENCV_GPU
  338. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  339. finder = new SurfFeaturesFinderGpu();
  340. else
  341. #endif
  342. finder = new SurfFeaturesFinder();
  343. }
  344. else if (features == "orb")
  345. {
  346. finder = new OrbFeaturesFinder();
  347. }
  348. else
  349. {
  350. cout << "Unknown 2D features type: '" << features << "'.\n";
  351. return -1;
  352. }
  353. Mat full_img, img;
  354. vector<ImageFeatures> features(num_images);
  355. vector<Mat> images(num_images);
  356. vector<Size> full_img_sizes(num_images);
  357. double seam_work_aspect = 1;
  358. for (int i = 0; i < num_images; ++i)
  359. {
  360. full_img = imread(img_names[i]);
  361. full_img_sizes[i] = full_img.size();
  362. if (full_img.empty())
  363. {
  364. LOGLN("Can't open image " << img_names[i]);
  365. return -1;
  366. }
  367. if (work_megapix < 0)
  368. {
  369. img = full_img;
  370. work_scale = 1;
  371. is_work_scale_set = true;
  372. }
  373. else
  374. {
  375. if (!is_work_scale_set)
  376. {
  377. work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
  378. is_work_scale_set = true;
  379. }
  380. resize(full_img, img, Size(), work_scale, work_scale);
  381. }
  382. if (!is_seam_scale_set)
  383. {
  384. seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
  385. seam_work_aspect = seam_scale / work_scale;
  386. is_seam_scale_set = true;
  387. }
  388. (*finder)(img, features[i]);
  389. features[i].img_idx = i;
  390. LOGLN("Features in image #" << i+1 << ": " << features[i].keypoints.size());
  391. resize(full_img, img, Size(), seam_scale, seam_scale);
  392. images[i] = img.clone();
  393. }
  394. finder->collectGarbage();
  395. full_img.release();
  396. img.release();
  397. LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  398. LOG("Pairwise matching");
  399. t = getTickCount();
  400. vector<MatchesInfo> pairwise_matches;
  401. BestOf2NearestMatcher matcher(try_gpu, match_conf);
  402. matcher(features, pairwise_matches);
  403. matcher.collectGarbage();
  404. LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  405. // Check if we should save matches graph
  406. if (save_graph)
  407. {
  408. LOGLN("Saving matches graph...");
  409. ofstream f(save_graph_to.c_str());
  410. f << matchesGraphAsString(img_names, pairwise_matches, conf_thresh);
  411. }
  412. // Leave only images we are sure are from the same panorama
  413. vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
  414. vector<Mat> img_subset;
  415. vector<string> img_names_subset;
  416. vector<Size> full_img_sizes_subset;
  417. for (size_t i = 0; i < indices.size(); ++i)
  418. {
  419. img_names_subset.push_back(img_names[indices[i]]);
  420. img_subset.push_back(images[indices[i]]);
  421. full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
  422. }
  423. images = img_subset;
  424. img_names = img_names_subset;
  425. full_img_sizes = full_img_sizes_subset;
  426. // Check if we still have enough images
  427. num_images = static_cast<int>(img_names.size());
  428. if (num_images < 2)
  429. {
  430. LOGLN("Need more images");
  431. return -1;
  432. }
  433. HomographyBasedEstimator estimator;
  434. vector<CameraParams> cameras;
  435. estimator(features, pairwise_matches, cameras);
  436. for (size_t i = 0; i < cameras.size(); ++i)
  437. {
  438. Mat R;
  439. cameras[i].R.convertTo(R, CV_32F);
  440. cameras[i].R = R;
  441. LOGLN("Initial intrinsics #" << indices[i]+1 << ":\n" << cameras[i].K());
  442. }
  443. Ptr<detail::BundleAdjusterBase> adjuster;
  444. if (ba_cost_func == "reproj") adjuster = new detail::BundleAdjusterReproj();
  445. else if (ba_cost_func == "ray") adjuster = new detail::BundleAdjusterRay();
  446. else
  447. {
  448. cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
  449. return -1;
  450. }
  451. adjuster->setConfThresh(conf_thresh);
  452. Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
  453. if (ba_refine_mask[0] == 'x') refine_mask(0,0) = 1;
  454. if (ba_refine_mask[1] == 'x') refine_mask(0,1) = 1;
  455. if (ba_refine_mask[2] == 'x') refine_mask(0,2) = 1;
  456. if (ba_refine_mask[3] == 'x') refine_mask(1,1) = 1;
  457. if (ba_refine_mask[4] == 'x') refine_mask(1,2) = 1;
  458. adjuster->setRefinementMask(refine_mask);
  459. (*adjuster)(features, pairwise_matches, cameras);
  460. // Find median focal length
  461. vector<double> focals;
  462. for (size_t i = 0; i < cameras.size(); ++i)
  463. {
  464. LOGLN("Camera #" << indices[i]+1 << ":\n" << cameras[i].K());
  465. focals.push_back(cameras[i].focal);
  466. }
  467. sort(focals.begin(), focals.end());
  468. float warped_image_scale;
  469. if (focals.size() % 2 == 1)
  470. warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
  471. else
  472. warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
  473. if (do_wave_correct)
  474. {
  475. vector<Mat> rmats;
  476. for (size_t i = 0; i < cameras.size(); ++i)
  477. rmats.push_back(cameras[i].R);
  478. waveCorrect(rmats, wave_correct);
  479. for (size_t i = 0; i < cameras.size(); ++i)
  480. cameras[i].R = rmats[i];
  481. }
  482. LOGLN("Warping images (auxiliary)... ");
  483. t = getTickCount();
  484. vector<Point> corners(num_images);
  485. vector<Mat> masks_warped(num_images);
  486. vector<Mat> images_warped(num_images);
  487. vector<Size> sizes(num_images);
  488. vector<Mat> masks(num_images);
  489. // Preapre images masks
  490. for (int i = 0; i < num_images; ++i)
  491. {
  492. masks[i].create(images[i].size(), CV_8U);
  493. masks[i].setTo(Scalar::all(255));
  494. }
  495. // Warp images and their masks
  496. Ptr<WarperCreator> warper_creator;
  497. #ifdef HAVE_OPENCV_GPU
  498. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  499. {
  500. if (warp_type == "plane") warper_creator = new cv::PlaneWarperGpu();
  501. else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarperGpu();
  502. else if (warp_type == "spherical") warper_creator = new cv::SphericalWarperGpu();
  503. }
  504. else
  505. #endif
  506. {
  507. if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
  508. else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
  509. else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
  510. else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();
  511. else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();
  512. else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);
  513. else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);
  514. else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);
  515. else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);
  516. else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);
  517. else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);
  518. else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);
  519. else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);
  520. else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();
  521. else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();
  522. }
  523. if (warper_creator.empty())
  524. {
  525. cout << "Can't create the following warper '" << warp_type << "'\n";
  526. return 1;
  527. }
  528. Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
  529. for (int i = 0; i < num_images; ++i)
  530. {
  531. Mat_<float> K;
  532. cameras[i].K().convertTo(K, CV_32F);
  533. float swa = (float)seam_work_aspect;
  534. K(0,0) *= swa; K(0,2) *= swa;
  535. K(1,1) *= swa; K(1,2) *= swa;
  536. corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
  537. sizes[i] = images_warped[i].size();
  538. warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
  539. }
  540. vector<Mat> images_warped_f(num_images);
  541. for (int i = 0; i < num_images; ++i)
  542. images_warped[i].convertTo(images_warped_f[i], CV_32F);
  543. LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  544. Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);
  545. compensator->feed(corners, images_warped, masks_warped);
  546. Ptr<SeamFinder> seam_finder;
  547. if (seam_find_type == "no")
  548. seam_finder = new detail::NoSeamFinder();
  549. else if (seam_find_type == "voronoi")
  550. seam_finder = new detail::VoronoiSeamFinder();
  551. else if (seam_find_type == "gc_color")
  552. {
  553. #ifdef HAVE_OPENCV_GPU
  554. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  555. seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR);
  556. else
  557. #endif
  558. seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);
  559. }
  560. else if (seam_find_type == "gc_colorgrad")
  561. {
  562. #ifdef HAVE_OPENCV_GPU
  563. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  564. seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR_GRAD);
  565. else
  566. #endif
  567. seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR_GRAD);
  568. }
  569. if (seam_finder.empty())
  570. {
  571. cout << "Can't create the following seam finder '" << seam_find_type << "'\n";
  572. return 1;
  573. }
  574. seam_finder->find(images_warped_f, corners, masks_warped);
  575. // Release unused memory
  576. images.clear();
  577. images_warped.clear();
  578. images_warped_f.clear();
  579. masks.clear();
  580. LOGLN("Compositing...");
  581. t = getTickCount();
  582. Mat img_warped, img_warped_s;
  583. Mat dilated_mask, seam_mask, mask, mask_warped;
  584. Ptr<Blender> blender;
  585. double compose_seam_aspect = 1;
  586. double compose_work_aspect = 1;
  587. for (int img_idx = 0; img_idx < num_images; ++img_idx)
  588. {
  589. LOGLN("Compositing image #" << indices[img_idx]+1);
  590. // Read image and resize it if necessary
  591. full_img = imread(img_names[img_idx]);
  592. if (!is_compose_scale_set)
  593. {
  594. if (compose_megapix > 0)
  595. compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));
  596. is_compose_scale_set = true;
  597. // Compute relative scales
  598. compose_seam_aspect = compose_scale / seam_scale;
  599. compose_work_aspect = compose_scale / work_scale;
  600. // Update warped image scale
  601. warped_image_scale *= static_cast<float>(compose_work_aspect);
  602. warper = warper_creator->create(warped_image_scale);
  603. // Update corners and sizes
  604. for (int i = 0; i < num_images; ++i)
  605. {
  606. // Update intrinsics
  607. cameras[i].focal *= compose_work_aspect;
  608. cameras[i].ppx *= compose_work_aspect;
  609. cameras[i].ppy *= compose_work_aspect;
  610. // Update corner and size
  611. Size sz = full_img_sizes[i];
  612. if (std::abs(compose_scale - 1) > 1e-1)
  613. {
  614. sz.width = cvRound(full_img_sizes[i].width * compose_scale);
  615. sz.height = cvRound(full_img_sizes[i].height * compose_scale);
  616. }
  617. Mat K;
  618. cameras[i].K().convertTo(K, CV_32F);
  619. Rect roi = warper->warpRoi(sz, K, cameras[i].R);
  620. corners[i] = roi.tl();
  621. sizes[i] = roi.size();
  622. }
  623. }
  624. if (abs(compose_scale - 1) > 1e-1)
  625. resize(full_img, img, Size(), compose_scale, compose_scale);
  626. else
  627. img = full_img;
  628. full_img.release();
  629. Size img_size = img.size();
  630. Mat K;
  631. cameras[img_idx].K().convertTo(K, CV_32F);
  632. // Warp the current image
  633. warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
  634. // Warp the current image mask
  635. mask.create(img_size, CV_8U);
  636. mask.setTo(Scalar::all(255));
  637. warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
  638. // Compensate exposure
  639. compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
  640. img_warped.convertTo(img_warped_s, CV_16S);
  641. img_warped.release();
  642. img.release();
  643. mask.release();
  644. dilate(masks_warped[img_idx], dilated_mask, Mat());
  645. resize(dilated_mask, seam_mask, mask_warped.size());
  646. mask_warped = seam_mask & mask_warped;
  647. if (blender.empty())
  648. {
  649. blender = Blender::createDefault(blend_type, try_gpu);
  650. Size dst_sz = resultRoi(corners, sizes).size();
  651. float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
  652. if (blend_width < 1.f)
  653. blender = Blender::createDefault(Blender::NO, try_gpu);
  654. else if (blend_type == Blender::MULTI_BAND)
  655. {
  656. MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
  657. mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
  658. LOGLN("Multi-band blender, number of bands: " << mb->numBands());
  659. }
  660. else if (blend_type == Blender::FEATHER)
  661. {
  662. FeatherBlender* fb = dynamic_cast<FeatherBlender*>(static_cast<Blender*>(blender));
  663. fb->setSharpness(1.f/blend_width);
  664. LOGLN("Feather blender, sharpness: " << fb->sharpness());
  665. }
  666. blender->prepare(corners, sizes);
  667. }
  668. // Blend the current image
  669. blender->feed(img_warped_s, mask_warped, corners[img_idx]);
  670. }
  671. Mat result, result_mask;
  672. blender->blend(result, result_mask);
  673. LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  674. imwrite(result_name, result);
  675. LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
  676. return 0;
  677. }