PageRenderTime 64ms CodeModel.GetById 26ms RepoModel.GetById 1ms app.codeStats 0ms

/samples/cpp/stitching_detailed.cpp

https://github.com/NITESH21/opencv
C++ | 767 lines | 632 code | 72 blank | 63 comment | 238 complexity | a454b8c7258a901d12e69d1cc612104e MD5 | raw file
Possible License(s): LGPL-3.0, BSD-3-Clause
  1. /*M///////////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  4. //
  5. // By downloading, copying, installing or using the software you agree to this license.
  6. // If you do not agree to this license, do not download, install,
  7. // copy or use the software.
  8. //
  9. //
  10. // License Agreement
  11. // For Open Source Computer Vision Library
  12. //
  13. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
  14. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
  15. // Third party copyrights are property of their respective owners.
  16. //
  17. // Redistribution and use in source and binary forms, with or without modification,
  18. // are permitted provided that the following conditions are met:
  19. //
  20. // * Redistribution's of source code must retain the above copyright notice,
  21. // this list of conditions and the following disclaimer.
  22. //
  23. // * Redistribution's in binary form must reproduce the above copyright notice,
  24. // this list of conditions and the following disclaimer in the documentation
  25. // and/or other materials provided with the distribution.
  26. //
  27. // * The name of the copyright holders may not be used to endorse or promote products
  28. // derived from this software without specific prior written permission.
  29. //
  30. // This software is provided by the copyright holders and contributors "as is" and
  31. // any express or implied warranties, including, but not limited to, the implied
  32. // warranties of merchantability and fitness for a particular purpose are disclaimed.
  33. // In no event shall the Intel Corporation or contributors be liable for any direct,
  34. // indirect, incidental, special, exemplary, or consequential damages
  35. // (including, but not limited to, procurement of substitute goods or services;
  36. // loss of use, data, or profits; or business interruption) however caused
  37. // and on any theory of liability, whether in contract, strict liability,
  38. // or tort (including negligence or otherwise) arising in any way out of
  39. // the use of this software, even if advised of the possibility of such damage.
  40. //
  41. //
  42. //M*/
  43. #include <iostream>
  44. #include <fstream>
  45. #include <string>
  46. #include "opencv2/opencv_modules.hpp"
  47. #include <opencv2/core/utility.hpp>
  48. #include "opencv2/highgui.hpp"
  49. #include "opencv2/stitching/detail/autocalib.hpp"
  50. #include "opencv2/stitching/detail/blenders.hpp"
  51. #include "opencv2/stitching/detail/camera.hpp"
  52. #include "opencv2/stitching/detail/exposure_compensate.hpp"
  53. #include "opencv2/stitching/detail/matchers.hpp"
  54. #include "opencv2/stitching/detail/motion_estimators.hpp"
  55. #include "opencv2/stitching/detail/seam_finders.hpp"
  56. #include "opencv2/stitching/detail/util.hpp"
  57. #include "opencv2/stitching/detail/warpers.hpp"
  58. #include "opencv2/stitching/warpers.hpp"
  59. using namespace std;
  60. using namespace cv;
  61. using namespace cv::detail;
  62. static void printUsage()
  63. {
  64. cout <<
  65. "Rotation model images stitcher.\n\n"
  66. "stitching_detailed img1 img2 [...imgN] [flags]\n\n"
  67. "Flags:\n"
  68. " --preview\n"
  69. " Run stitching in the preview mode. Works faster than usual mode,\n"
  70. " but output image will have lower resolution.\n"
  71. " --try_gpu (yes|no)\n"
  72. " Try to use GPU. The default value is 'no'. All default values\n"
  73. " are for CPU mode.\n"
  74. "\nMotion Estimation Flags:\n"
  75. " --work_megapix <float>\n"
  76. " Resolution for image registration step. The default is 0.6 Mpx.\n"
  77. " --features (surf|orb)\n"
  78. " Type of features used for images matching. The default is surf.\n"
  79. " --match_conf <float>\n"
  80. " Confidence for feature matching step. The default is 0.65 for surf and 0.3 for orb.\n"
  81. " --conf_thresh <float>\n"
  82. " Threshold for two images are from the same panorama confidence.\n"
  83. " The default is 1.0.\n"
  84. " --ba (reproj|ray)\n"
  85. " Bundle adjustment cost function. The default is ray.\n"
  86. " --ba_refine_mask (mask)\n"
  87. " Set refinement mask for bundle adjustment. It looks like 'x_xxx',\n"
  88. " where 'x' means refine respective parameter and '_' means don't\n"
  89. " refine one, and has the following format:\n"
  90. " <fx><skew><ppx><aspect><ppy>. The default mask is 'xxxxx'. If bundle\n"
  91. " adjustment doesn't support estimation of selected parameter then\n"
  92. " the respective flag is ignored.\n"
  93. " --wave_correct (no|horiz|vert)\n"
  94. " Perform wave effect correction. The default is 'horiz'.\n"
  95. " --save_graph <file_name>\n"
  96. " Save matches graph represented in DOT language to <file_name> file.\n"
  97. " Labels description: Nm is number of matches, Ni is number of inliers,\n"
  98. " C is confidence.\n"
  99. "\nCompositing Flags:\n"
  100. " --warp (plane|cylindrical|spherical|fisheye|stereographic|compressedPlaneA2B1|compressedPlaneA1.5B1|compressedPlanePortraitA2B1|compressedPlanePortraitA1.5B1|paniniA2B1|paniniA1.5B1|paniniPortraitA2B1|paniniPortraitA1.5B1|mercator|transverseMercator)\n"
  101. " Warp surface type. The default is 'spherical'.\n"
  102. " --seam_megapix <float>\n"
  103. " Resolution for seam estimation step. The default is 0.1 Mpx.\n"
  104. " --seam (no|voronoi|gc_color|gc_colorgrad)\n"
  105. " Seam estimation method. The default is 'gc_color'.\n"
  106. " --compose_megapix <float>\n"
  107. " Resolution for compositing step. Use -1 for original resolution.\n"
  108. " The default is -1.\n"
  109. " --expos_comp (no|gain|gain_blocks)\n"
  110. " Exposure compensation method. The default is 'gain_blocks'.\n"
  111. " --blend (no|feather|multiband)\n"
  112. " Blending method. The default is 'multiband'.\n"
  113. " --blend_strength <float>\n"
  114. " Blending strength from [0,100] range. The default is 5.\n"
  115. " --output <result_img>\n"
  116. " The default is 'result.jpg'.\n";
  117. }
  118. // Default command line args
  119. vector<String> img_names;
  120. bool preview = false;
  121. bool try_gpu = false;
  122. double work_megapix = 0.6;
  123. double seam_megapix = 0.1;
  124. double compose_megapix = -1;
  125. float conf_thresh = 1.f;
  126. string features_type = "surf";
  127. string ba_cost_func = "ray";
  128. string ba_refine_mask = "xxxxx";
  129. bool do_wave_correct = true;
  130. WaveCorrectKind wave_correct = detail::WAVE_CORRECT_HORIZ;
  131. bool save_graph = false;
  132. std::string save_graph_to;
  133. string warp_type = "spherical";
  134. int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
  135. float match_conf = 0.3f;
  136. string seam_find_type = "gc_color";
  137. int blend_type = Blender::MULTI_BAND;
  138. float blend_strength = 5;
  139. string result_name = "result.jpg";
  140. static int parseCmdArgs(int argc, char** argv)
  141. {
  142. if (argc == 1)
  143. {
  144. printUsage();
  145. return -1;
  146. }
  147. for (int i = 1; i < argc; ++i)
  148. {
  149. if (string(argv[i]) == "--help" || string(argv[i]) == "/?")
  150. {
  151. printUsage();
  152. return -1;
  153. }
  154. else if (string(argv[i]) == "--preview")
  155. {
  156. preview = true;
  157. }
  158. else if (string(argv[i]) == "--try_gpu")
  159. {
  160. if (string(argv[i + 1]) == "no")
  161. try_gpu = false;
  162. else if (string(argv[i + 1]) == "yes")
  163. try_gpu = true;
  164. else
  165. {
  166. cout << "Bad --try_gpu flag value\n";
  167. return -1;
  168. }
  169. i++;
  170. }
  171. else if (string(argv[i]) == "--work_megapix")
  172. {
  173. work_megapix = atof(argv[i + 1]);
  174. i++;
  175. }
  176. else if (string(argv[i]) == "--seam_megapix")
  177. {
  178. seam_megapix = atof(argv[i + 1]);
  179. i++;
  180. }
  181. else if (string(argv[i]) == "--compose_megapix")
  182. {
  183. compose_megapix = atof(argv[i + 1]);
  184. i++;
  185. }
  186. else if (string(argv[i]) == "--result")
  187. {
  188. result_name = argv[i + 1];
  189. i++;
  190. }
  191. else if (string(argv[i]) == "--features")
  192. {
  193. features_type = argv[i + 1];
  194. if (features_type == "orb")
  195. match_conf = 0.3f;
  196. i++;
  197. }
  198. else if (string(argv[i]) == "--match_conf")
  199. {
  200. match_conf = static_cast<float>(atof(argv[i + 1]));
  201. i++;
  202. }
  203. else if (string(argv[i]) == "--conf_thresh")
  204. {
  205. conf_thresh = static_cast<float>(atof(argv[i + 1]));
  206. i++;
  207. }
  208. else if (string(argv[i]) == "--ba")
  209. {
  210. ba_cost_func = argv[i + 1];
  211. i++;
  212. }
  213. else if (string(argv[i]) == "--ba_refine_mask")
  214. {
  215. ba_refine_mask = argv[i + 1];
  216. if (ba_refine_mask.size() != 5)
  217. {
  218. cout << "Incorrect refinement mask length.\n";
  219. return -1;
  220. }
  221. i++;
  222. }
  223. else if (string(argv[i]) == "--wave_correct")
  224. {
  225. if (string(argv[i + 1]) == "no")
  226. do_wave_correct = false;
  227. else if (string(argv[i + 1]) == "horiz")
  228. {
  229. do_wave_correct = true;
  230. wave_correct = detail::WAVE_CORRECT_HORIZ;
  231. }
  232. else if (string(argv[i + 1]) == "vert")
  233. {
  234. do_wave_correct = true;
  235. wave_correct = detail::WAVE_CORRECT_VERT;
  236. }
  237. else
  238. {
  239. cout << "Bad --wave_correct flag value\n";
  240. return -1;
  241. }
  242. i++;
  243. }
  244. else if (string(argv[i]) == "--save_graph")
  245. {
  246. save_graph = true;
  247. save_graph_to = argv[i + 1];
  248. i++;
  249. }
  250. else if (string(argv[i]) == "--warp")
  251. {
  252. warp_type = string(argv[i + 1]);
  253. i++;
  254. }
  255. else if (string(argv[i]) == "--expos_comp")
  256. {
  257. if (string(argv[i + 1]) == "no")
  258. expos_comp_type = ExposureCompensator::NO;
  259. else if (string(argv[i + 1]) == "gain")
  260. expos_comp_type = ExposureCompensator::GAIN;
  261. else if (string(argv[i + 1]) == "gain_blocks")
  262. expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
  263. else
  264. {
  265. cout << "Bad exposure compensation method\n";
  266. return -1;
  267. }
  268. i++;
  269. }
  270. else if (string(argv[i]) == "--seam")
  271. {
  272. if (string(argv[i + 1]) == "no" ||
  273. string(argv[i + 1]) == "voronoi" ||
  274. string(argv[i + 1]) == "gc_color" ||
  275. string(argv[i + 1]) == "gc_colorgrad" ||
  276. string(argv[i + 1]) == "dp_color" ||
  277. string(argv[i + 1]) == "dp_colorgrad")
  278. seam_find_type = argv[i + 1];
  279. else
  280. {
  281. cout << "Bad seam finding method\n";
  282. return -1;
  283. }
  284. i++;
  285. }
  286. else if (string(argv[i]) == "--blend")
  287. {
  288. if (string(argv[i + 1]) == "no")
  289. blend_type = Blender::NO;
  290. else if (string(argv[i + 1]) == "feather")
  291. blend_type = Blender::FEATHER;
  292. else if (string(argv[i + 1]) == "multiband")
  293. blend_type = Blender::MULTI_BAND;
  294. else
  295. {
  296. cout << "Bad blending method\n";
  297. return -1;
  298. }
  299. i++;
  300. }
  301. else if (string(argv[i]) == "--blend_strength")
  302. {
  303. blend_strength = static_cast<float>(atof(argv[i + 1]));
  304. i++;
  305. }
  306. else if (string(argv[i]) == "--output")
  307. {
  308. result_name = argv[i + 1];
  309. i++;
  310. }
  311. else
  312. img_names.push_back(argv[i]);
  313. }
  314. if (preview)
  315. {
  316. compose_megapix = 0.6;
  317. }
  318. return 0;
  319. }
  320. int main(int argc, char* argv[])
  321. {
  322. #if ENABLE_LOG
  323. int64 app_start_time = getTickCount();
  324. #endif
  325. cv::setBreakOnError(true);
  326. int retval = parseCmdArgs(argc, argv);
  327. if (retval)
  328. return retval;
  329. // Check if have enough images
  330. int num_images = static_cast<int>(img_names.size());
  331. if (num_images < 2)
  332. {
  333. LOGLN("Need more images");
  334. return -1;
  335. }
  336. double work_scale = 1, seam_scale = 1, compose_scale = 1;
  337. bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;
  338. LOGLN("Finding features...");
  339. #if ENABLE_LOG
  340. int64 t = getTickCount();
  341. #endif
  342. Ptr<FeaturesFinder> finder;
  343. if (features_type == "surf")
  344. {
  345. #ifdef HAVE_OPENCV_NONFREE
  346. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  347. finder = new SurfFeaturesFinderGpu();
  348. else
  349. #endif
  350. finder = new SurfFeaturesFinder();
  351. }
  352. else if (features_type == "orb")
  353. {
  354. finder = new OrbFeaturesFinder();
  355. }
  356. else
  357. {
  358. cout << "Unknown 2D features type: '" << features_type << "'.\n";
  359. return -1;
  360. }
  361. Mat full_img, img;
  362. vector<ImageFeatures> features(num_images);
  363. vector<Mat> images(num_images);
  364. vector<Size> full_img_sizes(num_images);
  365. double seam_work_aspect = 1;
  366. for (int i = 0; i < num_images; ++i)
  367. {
  368. full_img = imread(img_names[i]);
  369. full_img_sizes[i] = full_img.size();
  370. if (full_img.empty())
  371. {
  372. LOGLN("Can't open image " << img_names[i]);
  373. return -1;
  374. }
  375. if (work_megapix < 0)
  376. {
  377. img = full_img;
  378. work_scale = 1;
  379. is_work_scale_set = true;
  380. }
  381. else
  382. {
  383. if (!is_work_scale_set)
  384. {
  385. work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
  386. is_work_scale_set = true;
  387. }
  388. resize(full_img, img, Size(), work_scale, work_scale);
  389. }
  390. if (!is_seam_scale_set)
  391. {
  392. seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
  393. seam_work_aspect = seam_scale / work_scale;
  394. is_seam_scale_set = true;
  395. }
  396. (*finder)(img, features[i]);
  397. features[i].img_idx = i;
  398. LOGLN("Features in image #" << i+1 << ": " << features[i].keypoints.size());
  399. resize(full_img, img, Size(), seam_scale, seam_scale);
  400. images[i] = img.clone();
  401. }
  402. finder->collectGarbage();
  403. full_img.release();
  404. img.release();
  405. LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  406. LOG("Pairwise matching");
  407. #if ENABLE_LOG
  408. t = getTickCount();
  409. #endif
  410. vector<MatchesInfo> pairwise_matches;
  411. BestOf2NearestMatcher matcher(try_gpu, match_conf);
  412. matcher(features, pairwise_matches);
  413. matcher.collectGarbage();
  414. LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  415. // Check if we should save matches graph
  416. if (save_graph)
  417. {
  418. LOGLN("Saving matches graph...");
  419. ofstream f(save_graph_to.c_str());
  420. f << matchesGraphAsString(img_names, pairwise_matches, conf_thresh);
  421. }
  422. // Leave only images we are sure are from the same panorama
  423. vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
  424. vector<Mat> img_subset;
  425. vector<String> img_names_subset;
  426. vector<Size> full_img_sizes_subset;
  427. for (size_t i = 0; i < indices.size(); ++i)
  428. {
  429. img_names_subset.push_back(img_names[indices[i]]);
  430. img_subset.push_back(images[indices[i]]);
  431. full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
  432. }
  433. images = img_subset;
  434. img_names = img_names_subset;
  435. full_img_sizes = full_img_sizes_subset;
  436. // Check if we still have enough images
  437. num_images = static_cast<int>(img_names.size());
  438. if (num_images < 2)
  439. {
  440. LOGLN("Need more images");
  441. return -1;
  442. }
  443. HomographyBasedEstimator estimator;
  444. vector<CameraParams> cameras;
  445. estimator(features, pairwise_matches, cameras);
  446. for (size_t i = 0; i < cameras.size(); ++i)
  447. {
  448. Mat R;
  449. cameras[i].R.convertTo(R, CV_32F);
  450. cameras[i].R = R;
  451. LOGLN("Initial intrinsics #" << indices[i]+1 << ":\n" << cameras[i].K());
  452. }
  453. Ptr<detail::BundleAdjusterBase> adjuster;
  454. if (ba_cost_func == "reproj") adjuster = new detail::BundleAdjusterReproj();
  455. else if (ba_cost_func == "ray") adjuster = new detail::BundleAdjusterRay();
  456. else
  457. {
  458. cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
  459. return -1;
  460. }
  461. adjuster->setConfThresh(conf_thresh);
  462. Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
  463. if (ba_refine_mask[0] == 'x') refine_mask(0,0) = 1;
  464. if (ba_refine_mask[1] == 'x') refine_mask(0,1) = 1;
  465. if (ba_refine_mask[2] == 'x') refine_mask(0,2) = 1;
  466. if (ba_refine_mask[3] == 'x') refine_mask(1,1) = 1;
  467. if (ba_refine_mask[4] == 'x') refine_mask(1,2) = 1;
  468. adjuster->setRefinementMask(refine_mask);
  469. (*adjuster)(features, pairwise_matches, cameras);
  470. // Find median focal length
  471. vector<double> focals;
  472. for (size_t i = 0; i < cameras.size(); ++i)
  473. {
  474. LOGLN("Camera #" << indices[i]+1 << ":\n" << cameras[i].K());
  475. focals.push_back(cameras[i].focal);
  476. }
  477. sort(focals.begin(), focals.end());
  478. float warped_image_scale;
  479. if (focals.size() % 2 == 1)
  480. warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
  481. else
  482. warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
  483. if (do_wave_correct)
  484. {
  485. vector<Mat> rmats;
  486. for (size_t i = 0; i < cameras.size(); ++i)
  487. rmats.push_back(cameras[i].R);
  488. waveCorrect(rmats, wave_correct);
  489. for (size_t i = 0; i < cameras.size(); ++i)
  490. cameras[i].R = rmats[i];
  491. }
  492. LOGLN("Warping images (auxiliary)... ");
  493. #if ENABLE_LOG
  494. t = getTickCount();
  495. #endif
  496. vector<Point> corners(num_images);
  497. vector<Mat> masks_warped(num_images);
  498. vector<Mat> images_warped(num_images);
  499. vector<Size> sizes(num_images);
  500. vector<Mat> masks(num_images);
  501. // Preapre images masks
  502. for (int i = 0; i < num_images; ++i)
  503. {
  504. masks[i].create(images[i].size(), CV_8U);
  505. masks[i].setTo(Scalar::all(255));
  506. }
  507. // Warp images and their masks
  508. Ptr<WarperCreator> warper_creator;
  509. #ifdef HAVE_OPENCV_GPUWARPING
  510. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  511. {
  512. if (warp_type == "plane") warper_creator = new cv::PlaneWarperGpu();
  513. else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarperGpu();
  514. else if (warp_type == "spherical") warper_creator = new cv::SphericalWarperGpu();
  515. }
  516. else
  517. #endif
  518. {
  519. if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
  520. else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
  521. else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
  522. else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();
  523. else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();
  524. else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);
  525. else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);
  526. else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);
  527. else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);
  528. else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);
  529. else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);
  530. else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);
  531. else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);
  532. else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();
  533. else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();
  534. }
  535. if (warper_creator.empty())
  536. {
  537. cout << "Can't create the following warper '" << warp_type << "'\n";
  538. return 1;
  539. }
  540. Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
  541. for (int i = 0; i < num_images; ++i)
  542. {
  543. Mat_<float> K;
  544. cameras[i].K().convertTo(K, CV_32F);
  545. float swa = (float)seam_work_aspect;
  546. K(0,0) *= swa; K(0,2) *= swa;
  547. K(1,1) *= swa; K(1,2) *= swa;
  548. corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
  549. sizes[i] = images_warped[i].size();
  550. warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
  551. }
  552. vector<Mat> images_warped_f(num_images);
  553. for (int i = 0; i < num_images; ++i)
  554. images_warped[i].convertTo(images_warped_f[i], CV_32F);
  555. LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  556. Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);
  557. compensator->feed(corners, images_warped, masks_warped);
  558. Ptr<SeamFinder> seam_finder;
  559. if (seam_find_type == "no")
  560. seam_finder = new detail::NoSeamFinder();
  561. else if (seam_find_type == "voronoi")
  562. seam_finder = new detail::VoronoiSeamFinder();
  563. else if (seam_find_type == "gc_color")
  564. {
  565. #ifdef HAVE_OPENCV_GPU
  566. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  567. seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR);
  568. else
  569. #endif
  570. seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);
  571. }
  572. else if (seam_find_type == "gc_colorgrad")
  573. {
  574. #ifdef HAVE_OPENCV_GPU
  575. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  576. seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR_GRAD);
  577. else
  578. #endif
  579. seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR_GRAD);
  580. }
  581. else if (seam_find_type == "dp_color")
  582. seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR);
  583. else if (seam_find_type == "dp_colorgrad")
  584. seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR_GRAD);
  585. if (seam_finder.empty())
  586. {
  587. cout << "Can't create the following seam finder '" << seam_find_type << "'\n";
  588. return 1;
  589. }
  590. seam_finder->find(images_warped_f, corners, masks_warped);
  591. // Release unused memory
  592. images.clear();
  593. images_warped.clear();
  594. images_warped_f.clear();
  595. masks.clear();
  596. LOGLN("Compositing...");
  597. #if ENABLE_LOG
  598. t = getTickCount();
  599. #endif
  600. Mat img_warped, img_warped_s;
  601. Mat dilated_mask, seam_mask, mask, mask_warped;
  602. Ptr<Blender> blender;
  603. //double compose_seam_aspect = 1;
  604. double compose_work_aspect = 1;
  605. for (int img_idx = 0; img_idx < num_images; ++img_idx)
  606. {
  607. LOGLN("Compositing image #" << indices[img_idx]+1);
  608. // Read image and resize it if necessary
  609. full_img = imread(img_names[img_idx]);
  610. if (!is_compose_scale_set)
  611. {
  612. if (compose_megapix > 0)
  613. compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));
  614. is_compose_scale_set = true;
  615. // Compute relative scales
  616. //compose_seam_aspect = compose_scale / seam_scale;
  617. compose_work_aspect = compose_scale / work_scale;
  618. // Update warped image scale
  619. warped_image_scale *= static_cast<float>(compose_work_aspect);
  620. warper = warper_creator->create(warped_image_scale);
  621. // Update corners and sizes
  622. for (int i = 0; i < num_images; ++i)
  623. {
  624. // Update intrinsics
  625. cameras[i].focal *= compose_work_aspect;
  626. cameras[i].ppx *= compose_work_aspect;
  627. cameras[i].ppy *= compose_work_aspect;
  628. // Update corner and size
  629. Size sz = full_img_sizes[i];
  630. if (std::abs(compose_scale - 1) > 1e-1)
  631. {
  632. sz.width = cvRound(full_img_sizes[i].width * compose_scale);
  633. sz.height = cvRound(full_img_sizes[i].height * compose_scale);
  634. }
  635. Mat K;
  636. cameras[i].K().convertTo(K, CV_32F);
  637. Rect roi = warper->warpRoi(sz, K, cameras[i].R);
  638. corners[i] = roi.tl();
  639. sizes[i] = roi.size();
  640. }
  641. }
  642. if (abs(compose_scale - 1) > 1e-1)
  643. resize(full_img, img, Size(), compose_scale, compose_scale);
  644. else
  645. img = full_img;
  646. full_img.release();
  647. Size img_size = img.size();
  648. Mat K;
  649. cameras[img_idx].K().convertTo(K, CV_32F);
  650. // Warp the current image
  651. warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
  652. // Warp the current image mask
  653. mask.create(img_size, CV_8U);
  654. mask.setTo(Scalar::all(255));
  655. warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
  656. // Compensate exposure
  657. compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
  658. img_warped.convertTo(img_warped_s, CV_16S);
  659. img_warped.release();
  660. img.release();
  661. mask.release();
  662. dilate(masks_warped[img_idx], dilated_mask, Mat());
  663. resize(dilated_mask, seam_mask, mask_warped.size());
  664. mask_warped = seam_mask & mask_warped;
  665. if (blender.empty())
  666. {
  667. blender = Blender::createDefault(blend_type, try_gpu);
  668. Size dst_sz = resultRoi(corners, sizes).size();
  669. float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
  670. if (blend_width < 1.f)
  671. blender = Blender::createDefault(Blender::NO, try_gpu);
  672. else if (blend_type == Blender::MULTI_BAND)
  673. {
  674. MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
  675. mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
  676. LOGLN("Multi-band blender, number of bands: " << mb->numBands());
  677. }
  678. else if (blend_type == Blender::FEATHER)
  679. {
  680. FeatherBlender* fb = dynamic_cast<FeatherBlender*>(static_cast<Blender*>(blender));
  681. fb->setSharpness(1.f/blend_width);
  682. LOGLN("Feather blender, sharpness: " << fb->sharpness());
  683. }
  684. blender->prepare(corners, sizes);
  685. }
  686. // Blend the current image
  687. blender->feed(img_warped_s, mask_warped, corners[img_idx]);
  688. }
  689. Mat result, result_mask;
  690. blender->blend(result, result_mask);
  691. LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  692. imwrite(result_name, result);
  693. LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
  694. return 0;
  695. }