PageRenderTime 46ms CodeModel.GetById 13ms RepoModel.GetById 0ms app.codeStats 0ms

/samples/cpp/stitching_detailed.cpp

https://github.com/smart-make/opencv
C++ | 775 lines | 640 code | 72 blank | 63 comment | 240 complexity | 4722c4e46bd5bcd6c073fdc27dea5026 MD5 | raw file
  1. /*M///////////////////////////////////////////////////////////////////////////////////////
  2. //
  3. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
  4. //
  5. // By downloading, copying, installing or using the software you agree to this license.
  6. // If you do not agree to this license, do not download, install,
  7. // copy or use the software.
  8. //
  9. //
  10. // License Agreement
  11. // For Open Source Computer Vision Library
  12. //
  13. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
  14. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
  15. // Third party copyrights are property of their respective owners.
  16. //
  17. // Redistribution and use in source and binary forms, with or without modification,
  18. // are permitted provided that the following conditions are met:
  19. //
  20. // * Redistribution's of source code must retain the above copyright notice,
  21. // this list of conditions and the following disclaimer.
  22. //
  23. // * Redistribution's in binary form must reproduce the above copyright notice,
  24. // this list of conditions and the following disclaimer in the documentation
  25. // and/or other materials provided with the distribution.
  26. //
  27. // * The name of the copyright holders may not be used to endorse or promote products
  28. // derived from this software without specific prior written permission.
  29. //
  30. // This software is provided by the copyright holders and contributors "as is" and
  31. // any express or implied warranties, including, but not limited to, the implied
  32. // warranties of merchantability and fitness for a particular purpose are disclaimed.
  33. // In no event shall the Intel Corporation or contributors be liable for any direct,
  34. // indirect, incidental, special, exemplary, or consequential damages
  35. // (including, but not limited to, procurement of substitute goods or services;
  36. // loss of use, data, or profits; or business interruption) however caused
  37. // and on any theory of liability, whether in contract, strict liability,
  38. // or tort (including negligence or otherwise) arising in any way out of
  39. // the use of this software, even if advised of the possibility of such damage.
  40. //
  41. //
  42. //M*/
  43. #include <iostream>
  44. #include <fstream>
  45. #include <string>
  46. #include "opencv2/opencv_modules.hpp"
  47. #include <opencv2/core/utility.hpp>
  48. #include "opencv2/highgui.hpp"
  49. #include "opencv2/stitching/detail/autocalib.hpp"
  50. #include "opencv2/stitching/detail/blenders.hpp"
  51. #include "opencv2/stitching/detail/camera.hpp"
  52. #include "opencv2/stitching/detail/exposure_compensate.hpp"
  53. #include "opencv2/stitching/detail/matchers.hpp"
  54. #include "opencv2/stitching/detail/motion_estimators.hpp"
  55. #include "opencv2/stitching/detail/seam_finders.hpp"
  56. #include "opencv2/stitching/detail/util.hpp"
  57. #include "opencv2/stitching/detail/warpers.hpp"
  58. #include "opencv2/stitching/warpers.hpp"
  59. using namespace std;
  60. using namespace cv;
  61. using namespace cv::detail;
  62. static void printUsage()
  63. {
  64. cout <<
  65. "Rotation model images stitcher.\n\n"
  66. "stitching_detailed img1 img2 [...imgN] [flags]\n\n"
  67. "Flags:\n"
  68. " --preview\n"
  69. " Run stitching in the preview mode. Works faster than usual mode,\n"
  70. " but output image will have lower resolution.\n"
  71. " --try_gpu (yes|no)\n"
  72. " Try to use GPU. The default value is 'no'. All default values\n"
  73. " are for CPU mode.\n"
  74. "\nMotion Estimation Flags:\n"
  75. " --work_megapix <float>\n"
  76. " Resolution for image registration step. The default is 0.6 Mpx.\n"
  77. " --features (surf|orb)\n"
  78. " Type of features used for images matching. The default is surf.\n"
  79. " --match_conf <float>\n"
  80. " Confidence for feature matching step. The default is 0.65 for surf and 0.3 for orb.\n"
  81. " --conf_thresh <float>\n"
  82. " Threshold for two images are from the same panorama confidence.\n"
  83. " The default is 1.0.\n"
  84. " --ba (reproj|ray)\n"
  85. " Bundle adjustment cost function. The default is ray.\n"
  86. " --ba_refine_mask (mask)\n"
  87. " Set refinement mask for bundle adjustment. It looks like 'x_xxx',\n"
  88. " where 'x' means refine respective parameter and '_' means don't\n"
  89. " refine one, and has the following format:\n"
  90. " <fx><skew><ppx><aspect><ppy>. The default mask is 'xxxxx'. If bundle\n"
  91. " adjustment doesn't support estimation of selected parameter then\n"
  92. " the respective flag is ignored.\n"
  93. " --wave_correct (no|horiz|vert)\n"
  94. " Perform wave effect correction. The default is 'horiz'.\n"
  95. " --save_graph <file_name>\n"
  96. " Save matches graph represented in DOT language to <file_name> file.\n"
  97. " Labels description: Nm is number of matches, Ni is number of inliers,\n"
  98. " C is confidence.\n"
  99. "\nCompositing Flags:\n"
  100. " --warp (plane|cylindrical|spherical|fisheye|stereographic|compressedPlaneA2B1|compressedPlaneA1.5B1|compressedPlanePortraitA2B1|compressedPlanePortraitA1.5B1|paniniA2B1|paniniA1.5B1|paniniPortraitA2B1|paniniPortraitA1.5B1|mercator|transverseMercator)\n"
  101. " Warp surface type. The default is 'spherical'.\n"
  102. " --seam_megapix <float>\n"
  103. " Resolution for seam estimation step. The default is 0.1 Mpx.\n"
  104. " --seam (no|voronoi|gc_color|gc_colorgrad)\n"
  105. " Seam estimation method. The default is 'gc_color'.\n"
  106. " --compose_megapix <float>\n"
  107. " Resolution for compositing step. Use -1 for original resolution.\n"
  108. " The default is -1.\n"
  109. " --expos_comp (no|gain|gain_blocks)\n"
  110. " Exposure compensation method. The default is 'gain_blocks'.\n"
  111. " --blend (no|feather|multiband)\n"
  112. " Blending method. The default is 'multiband'.\n"
  113. " --blend_strength <float>\n"
  114. " Blending strength from [0,100] range. The default is 5.\n"
  115. " --output <result_img>\n"
  116. " The default is 'result.jpg'.\n";
  117. }
  118. // Default command line args
  119. vector<String> img_names;
  120. bool preview = false;
  121. bool try_gpu = false;
  122. double work_megapix = 0.6;
  123. double seam_megapix = 0.1;
  124. double compose_megapix = -1;
  125. float conf_thresh = 1.f;
  126. string features_type = "surf";
  127. string ba_cost_func = "ray";
  128. string ba_refine_mask = "xxxxx";
  129. bool do_wave_correct = true;
  130. WaveCorrectKind wave_correct = detail::WAVE_CORRECT_HORIZ;
  131. bool save_graph = false;
  132. std::string save_graph_to;
  133. string warp_type = "spherical";
  134. int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
  135. float match_conf = 0.3f;
  136. string seam_find_type = "gc_color";
  137. int blend_type = Blender::MULTI_BAND;
  138. float blend_strength = 5;
  139. string result_name = "result.jpg";
  140. static int parseCmdArgs(int argc, char** argv)
  141. {
  142. if (argc == 1)
  143. {
  144. printUsage();
  145. return -1;
  146. }
  147. for (int i = 1; i < argc; ++i)
  148. {
  149. if (string(argv[i]) == "--help" || string(argv[i]) == "/?")
  150. {
  151. printUsage();
  152. return -1;
  153. }
  154. else if (string(argv[i]) == "--preview")
  155. {
  156. preview = true;
  157. }
  158. else if (string(argv[i]) == "--try_gpu")
  159. {
  160. if (string(argv[i + 1]) == "no")
  161. try_gpu = false;
  162. else if (string(argv[i + 1]) == "yes")
  163. try_gpu = true;
  164. else
  165. {
  166. cout << "Bad --try_gpu flag value\n";
  167. return -1;
  168. }
  169. i++;
  170. }
  171. else if (string(argv[i]) == "--work_megapix")
  172. {
  173. work_megapix = atof(argv[i + 1]);
  174. i++;
  175. }
  176. else if (string(argv[i]) == "--seam_megapix")
  177. {
  178. seam_megapix = atof(argv[i + 1]);
  179. i++;
  180. }
  181. else if (string(argv[i]) == "--compose_megapix")
  182. {
  183. compose_megapix = atof(argv[i + 1]);
  184. i++;
  185. }
  186. else if (string(argv[i]) == "--result")
  187. {
  188. result_name = argv[i + 1];
  189. i++;
  190. }
  191. else if (string(argv[i]) == "--features")
  192. {
  193. features_type = argv[i + 1];
  194. if (features_type == "orb")
  195. match_conf = 0.3f;
  196. i++;
  197. }
  198. else if (string(argv[i]) == "--match_conf")
  199. {
  200. match_conf = static_cast<float>(atof(argv[i + 1]));
  201. i++;
  202. }
  203. else if (string(argv[i]) == "--conf_thresh")
  204. {
  205. conf_thresh = static_cast<float>(atof(argv[i + 1]));
  206. i++;
  207. }
  208. else if (string(argv[i]) == "--ba")
  209. {
  210. ba_cost_func = argv[i + 1];
  211. i++;
  212. }
  213. else if (string(argv[i]) == "--ba_refine_mask")
  214. {
  215. ba_refine_mask = argv[i + 1];
  216. if (ba_refine_mask.size() != 5)
  217. {
  218. cout << "Incorrect refinement mask length.\n";
  219. return -1;
  220. }
  221. i++;
  222. }
  223. else if (string(argv[i]) == "--wave_correct")
  224. {
  225. if (string(argv[i + 1]) == "no")
  226. do_wave_correct = false;
  227. else if (string(argv[i + 1]) == "horiz")
  228. {
  229. do_wave_correct = true;
  230. wave_correct = detail::WAVE_CORRECT_HORIZ;
  231. }
  232. else if (string(argv[i + 1]) == "vert")
  233. {
  234. do_wave_correct = true;
  235. wave_correct = detail::WAVE_CORRECT_VERT;
  236. }
  237. else
  238. {
  239. cout << "Bad --wave_correct flag value\n";
  240. return -1;
  241. }
  242. i++;
  243. }
  244. else if (string(argv[i]) == "--save_graph")
  245. {
  246. save_graph = true;
  247. save_graph_to = argv[i + 1];
  248. i++;
  249. }
  250. else if (string(argv[i]) == "--warp")
  251. {
  252. warp_type = string(argv[i + 1]);
  253. i++;
  254. }
  255. else if (string(argv[i]) == "--expos_comp")
  256. {
  257. if (string(argv[i + 1]) == "no")
  258. expos_comp_type = ExposureCompensator::NO;
  259. else if (string(argv[i + 1]) == "gain")
  260. expos_comp_type = ExposureCompensator::GAIN;
  261. else if (string(argv[i + 1]) == "gain_blocks")
  262. expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
  263. else
  264. {
  265. cout << "Bad exposure compensation method\n";
  266. return -1;
  267. }
  268. i++;
  269. }
  270. else if (string(argv[i]) == "--seam")
  271. {
  272. if (string(argv[i + 1]) == "no" ||
  273. string(argv[i + 1]) == "voronoi" ||
  274. string(argv[i + 1]) == "gc_color" ||
  275. string(argv[i + 1]) == "gc_colorgrad" ||
  276. string(argv[i + 1]) == "dp_color" ||
  277. string(argv[i + 1]) == "dp_colorgrad")
  278. seam_find_type = argv[i + 1];
  279. else
  280. {
  281. cout << "Bad seam finding method\n";
  282. return -1;
  283. }
  284. i++;
  285. }
  286. else if (string(argv[i]) == "--blend")
  287. {
  288. if (string(argv[i + 1]) == "no")
  289. blend_type = Blender::NO;
  290. else if (string(argv[i + 1]) == "feather")
  291. blend_type = Blender::FEATHER;
  292. else if (string(argv[i + 1]) == "multiband")
  293. blend_type = Blender::MULTI_BAND;
  294. else
  295. {
  296. cout << "Bad blending method\n";
  297. return -1;
  298. }
  299. i++;
  300. }
  301. else if (string(argv[i]) == "--blend_strength")
  302. {
  303. blend_strength = static_cast<float>(atof(argv[i + 1]));
  304. i++;
  305. }
  306. else if (string(argv[i]) == "--output")
  307. {
  308. result_name = argv[i + 1];
  309. i++;
  310. }
  311. else
  312. img_names.push_back(argv[i]);
  313. }
  314. if (preview)
  315. {
  316. compose_megapix = 0.6;
  317. }
  318. return 0;
  319. }
  320. int main(int argc, char* argv[])
  321. {
  322. #if ENABLE_LOG
  323. int64 app_start_time = getTickCount();
  324. #endif
  325. cv::setBreakOnError(true);
  326. int retval = parseCmdArgs(argc, argv);
  327. if (retval)
  328. return retval;
  329. // Check if have enough images
  330. int num_images = static_cast<int>(img_names.size());
  331. if (num_images < 2)
  332. {
  333. LOGLN("Need more images");
  334. return -1;
  335. }
  336. double work_scale = 1, seam_scale = 1, compose_scale = 1;
  337. bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;
  338. LOGLN("Finding features...");
  339. #if ENABLE_LOG
  340. int64 t = getTickCount();
  341. #endif
  342. Ptr<FeaturesFinder> finder;
  343. if (features_type == "surf")
  344. {
  345. #ifdef HAVE_OPENCV_NONFREE
  346. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  347. finder = new SurfFeaturesFinderGpu();
  348. else
  349. #endif
  350. finder = new SurfFeaturesFinder();
  351. }
  352. else if (features_type == "orb")
  353. {
  354. finder = new OrbFeaturesFinder();
  355. }
  356. else
  357. {
  358. cout << "Unknown 2D features type: '" << features_type << "'.\n";
  359. return -1;
  360. }
  361. Mat full_img, img;
  362. vector<ImageFeatures> features(num_images);
  363. vector<Mat> images(num_images);
  364. vector<Size> full_img_sizes(num_images);
  365. double seam_work_aspect = 1;
  366. for (int i = 0; i < num_images; ++i)
  367. {
  368. full_img = imread(img_names[i]);
  369. full_img_sizes[i] = full_img.size();
  370. if (full_img.empty())
  371. {
  372. LOGLN("Can't open image " << img_names[i]);
  373. return -1;
  374. }
  375. if (work_megapix < 0)
  376. {
  377. img = full_img;
  378. work_scale = 1;
  379. is_work_scale_set = true;
  380. }
  381. else
  382. {
  383. if (!is_work_scale_set)
  384. {
  385. work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
  386. is_work_scale_set = true;
  387. }
  388. resize(full_img, img, Size(), work_scale, work_scale);
  389. }
  390. if (!is_seam_scale_set)
  391. {
  392. seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
  393. seam_work_aspect = seam_scale / work_scale;
  394. is_seam_scale_set = true;
  395. }
  396. (*finder)(img, features[i]);
  397. features[i].img_idx = i;
  398. LOGLN("Features in image #" << i+1 << ": " << features[i].keypoints.size());
  399. resize(full_img, img, Size(), seam_scale, seam_scale);
  400. images[i] = img.clone();
  401. }
  402. finder->collectGarbage();
  403. full_img.release();
  404. img.release();
  405. LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  406. LOG("Pairwise matching");
  407. #if ENABLE_LOG
  408. t = getTickCount();
  409. #endif
  410. vector<MatchesInfo> pairwise_matches;
  411. BestOf2NearestMatcher matcher(try_gpu, match_conf);
  412. matcher(features, pairwise_matches);
  413. matcher.collectGarbage();
  414. LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  415. // Check if we should save matches graph
  416. if (save_graph)
  417. {
  418. LOGLN("Saving matches graph...");
  419. ofstream f(save_graph_to.c_str());
  420. f << matchesGraphAsString(img_names, pairwise_matches, conf_thresh);
  421. }
  422. // Leave only images we are sure are from the same panorama
  423. vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
  424. vector<Mat> img_subset;
  425. vector<String> img_names_subset;
  426. vector<Size> full_img_sizes_subset;
  427. for (size_t i = 0; i < indices.size(); ++i)
  428. {
  429. img_names_subset.push_back(img_names[indices[i]]);
  430. img_subset.push_back(images[indices[i]]);
  431. full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
  432. }
  433. images = img_subset;
  434. img_names = img_names_subset;
  435. full_img_sizes = full_img_sizes_subset;
  436. // Check if we still have enough images
  437. num_images = static_cast<int>(img_names.size());
  438. if (num_images < 2)
  439. {
  440. LOGLN("Need more images");
  441. return -1;
  442. }
  443. HomographyBasedEstimator estimator;
  444. vector<CameraParams> cameras;
  445. if (!estimator(features, pairwise_matches, cameras))
  446. {
  447. cout << "Homography estimation failed.\n";
  448. return -1;
  449. }
  450. for (size_t i = 0; i < cameras.size(); ++i)
  451. {
  452. Mat R;
  453. cameras[i].R.convertTo(R, CV_32F);
  454. cameras[i].R = R;
  455. LOGLN("Initial intrinsics #" << indices[i]+1 << ":\n" << cameras[i].K());
  456. }
  457. Ptr<detail::BundleAdjusterBase> adjuster;
  458. if (ba_cost_func == "reproj") adjuster = new detail::BundleAdjusterReproj();
  459. else if (ba_cost_func == "ray") adjuster = new detail::BundleAdjusterRay();
  460. else
  461. {
  462. cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
  463. return -1;
  464. }
  465. adjuster->setConfThresh(conf_thresh);
  466. Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
  467. if (ba_refine_mask[0] == 'x') refine_mask(0,0) = 1;
  468. if (ba_refine_mask[1] == 'x') refine_mask(0,1) = 1;
  469. if (ba_refine_mask[2] == 'x') refine_mask(0,2) = 1;
  470. if (ba_refine_mask[3] == 'x') refine_mask(1,1) = 1;
  471. if (ba_refine_mask[4] == 'x') refine_mask(1,2) = 1;
  472. adjuster->setRefinementMask(refine_mask);
  473. if (!(*adjuster)(features, pairwise_matches, cameras))
  474. {
  475. cout << "Camera parameters adjusting failed.\n";
  476. return -1;
  477. }
  478. // Find median focal length
  479. vector<double> focals;
  480. for (size_t i = 0; i < cameras.size(); ++i)
  481. {
  482. LOGLN("Camera #" << indices[i]+1 << ":\n" << cameras[i].K());
  483. focals.push_back(cameras[i].focal);
  484. }
  485. sort(focals.begin(), focals.end());
  486. float warped_image_scale;
  487. if (focals.size() % 2 == 1)
  488. warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
  489. else
  490. warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
  491. if (do_wave_correct)
  492. {
  493. vector<Mat> rmats;
  494. for (size_t i = 0; i < cameras.size(); ++i)
  495. rmats.push_back(cameras[i].R);
  496. waveCorrect(rmats, wave_correct);
  497. for (size_t i = 0; i < cameras.size(); ++i)
  498. cameras[i].R = rmats[i];
  499. }
  500. LOGLN("Warping images (auxiliary)... ");
  501. #if ENABLE_LOG
  502. t = getTickCount();
  503. #endif
  504. vector<Point> corners(num_images);
  505. vector<Mat> masks_warped(num_images);
  506. vector<Mat> images_warped(num_images);
  507. vector<Size> sizes(num_images);
  508. vector<Mat> masks(num_images);
  509. // Preapre images masks
  510. for (int i = 0; i < num_images; ++i)
  511. {
  512. masks[i].create(images[i].size(), CV_8U);
  513. masks[i].setTo(Scalar::all(255));
  514. }
  515. // Warp images and their masks
  516. Ptr<WarperCreator> warper_creator;
  517. #ifdef HAVE_OPENCV_GPUWARPING
  518. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  519. {
  520. if (warp_type == "plane") warper_creator = new cv::PlaneWarperGpu();
  521. else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarperGpu();
  522. else if (warp_type == "spherical") warper_creator = new cv::SphericalWarperGpu();
  523. }
  524. else
  525. #endif
  526. {
  527. if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
  528. else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
  529. else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
  530. else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();
  531. else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();
  532. else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);
  533. else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);
  534. else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);
  535. else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);
  536. else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);
  537. else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);
  538. else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);
  539. else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);
  540. else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();
  541. else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();
  542. }
  543. if (warper_creator.empty())
  544. {
  545. cout << "Can't create the following warper '" << warp_type << "'\n";
  546. return 1;
  547. }
  548. Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
  549. for (int i = 0; i < num_images; ++i)
  550. {
  551. Mat_<float> K;
  552. cameras[i].K().convertTo(K, CV_32F);
  553. float swa = (float)seam_work_aspect;
  554. K(0,0) *= swa; K(0,2) *= swa;
  555. K(1,1) *= swa; K(1,2) *= swa;
  556. corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
  557. sizes[i] = images_warped[i].size();
  558. warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
  559. }
  560. vector<Mat> images_warped_f(num_images);
  561. for (int i = 0; i < num_images; ++i)
  562. images_warped[i].convertTo(images_warped_f[i], CV_32F);
  563. LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  564. Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);
  565. compensator->feed(corners, images_warped, masks_warped);
  566. Ptr<SeamFinder> seam_finder;
  567. if (seam_find_type == "no")
  568. seam_finder = new detail::NoSeamFinder();
  569. else if (seam_find_type == "voronoi")
  570. seam_finder = new detail::VoronoiSeamFinder();
  571. else if (seam_find_type == "gc_color")
  572. {
  573. #ifdef HAVE_OPENCV_GPU
  574. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  575. seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR);
  576. else
  577. #endif
  578. seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);
  579. }
  580. else if (seam_find_type == "gc_colorgrad")
  581. {
  582. #ifdef HAVE_OPENCV_GPU
  583. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  584. seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR_GRAD);
  585. else
  586. #endif
  587. seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR_GRAD);
  588. }
  589. else if (seam_find_type == "dp_color")
  590. seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR);
  591. else if (seam_find_type == "dp_colorgrad")
  592. seam_finder = new detail::DpSeamFinder(DpSeamFinder::COLOR_GRAD);
  593. if (seam_finder.empty())
  594. {
  595. cout << "Can't create the following seam finder '" << seam_find_type << "'\n";
  596. return 1;
  597. }
  598. seam_finder->find(images_warped_f, corners, masks_warped);
  599. // Release unused memory
  600. images.clear();
  601. images_warped.clear();
  602. images_warped_f.clear();
  603. masks.clear();
  604. LOGLN("Compositing...");
  605. #if ENABLE_LOG
  606. t = getTickCount();
  607. #endif
  608. Mat img_warped, img_warped_s;
  609. Mat dilated_mask, seam_mask, mask, mask_warped;
  610. Ptr<Blender> blender;
  611. //double compose_seam_aspect = 1;
  612. double compose_work_aspect = 1;
  613. for (int img_idx = 0; img_idx < num_images; ++img_idx)
  614. {
  615. LOGLN("Compositing image #" << indices[img_idx]+1);
  616. // Read image and resize it if necessary
  617. full_img = imread(img_names[img_idx]);
  618. if (!is_compose_scale_set)
  619. {
  620. if (compose_megapix > 0)
  621. compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));
  622. is_compose_scale_set = true;
  623. // Compute relative scales
  624. //compose_seam_aspect = compose_scale / seam_scale;
  625. compose_work_aspect = compose_scale / work_scale;
  626. // Update warped image scale
  627. warped_image_scale *= static_cast<float>(compose_work_aspect);
  628. warper = warper_creator->create(warped_image_scale);
  629. // Update corners and sizes
  630. for (int i = 0; i < num_images; ++i)
  631. {
  632. // Update intrinsics
  633. cameras[i].focal *= compose_work_aspect;
  634. cameras[i].ppx *= compose_work_aspect;
  635. cameras[i].ppy *= compose_work_aspect;
  636. // Update corner and size
  637. Size sz = full_img_sizes[i];
  638. if (std::abs(compose_scale - 1) > 1e-1)
  639. {
  640. sz.width = cvRound(full_img_sizes[i].width * compose_scale);
  641. sz.height = cvRound(full_img_sizes[i].height * compose_scale);
  642. }
  643. Mat K;
  644. cameras[i].K().convertTo(K, CV_32F);
  645. Rect roi = warper->warpRoi(sz, K, cameras[i].R);
  646. corners[i] = roi.tl();
  647. sizes[i] = roi.size();
  648. }
  649. }
  650. if (abs(compose_scale - 1) > 1e-1)
  651. resize(full_img, img, Size(), compose_scale, compose_scale);
  652. else
  653. img = full_img;
  654. full_img.release();
  655. Size img_size = img.size();
  656. Mat K;
  657. cameras[img_idx].K().convertTo(K, CV_32F);
  658. // Warp the current image
  659. warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
  660. // Warp the current image mask
  661. mask.create(img_size, CV_8U);
  662. mask.setTo(Scalar::all(255));
  663. warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
  664. // Compensate exposure
  665. compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
  666. img_warped.convertTo(img_warped_s, CV_16S);
  667. img_warped.release();
  668. img.release();
  669. mask.release();
  670. dilate(masks_warped[img_idx], dilated_mask, Mat());
  671. resize(dilated_mask, seam_mask, mask_warped.size());
  672. mask_warped = seam_mask & mask_warped;
  673. if (blender.empty())
  674. {
  675. blender = Blender::createDefault(blend_type, try_gpu);
  676. Size dst_sz = resultRoi(corners, sizes).size();
  677. float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
  678. if (blend_width < 1.f)
  679. blender = Blender::createDefault(Blender::NO, try_gpu);
  680. else if (blend_type == Blender::MULTI_BAND)
  681. {
  682. MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
  683. mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
  684. LOGLN("Multi-band blender, number of bands: " << mb->numBands());
  685. }
  686. else if (blend_type == Blender::FEATHER)
  687. {
  688. FeatherBlender* fb = dynamic_cast<FeatherBlender*>(static_cast<Blender*>(blender));
  689. fb->setSharpness(1.f/blend_width);
  690. LOGLN("Feather blender, sharpness: " << fb->sharpness());
  691. }
  692. blender->prepare(corners, sizes);
  693. }
  694. // Blend the current image
  695. blender->feed(img_warped_s, mask_warped, corners[img_idx]);
  696. }
  697. Mat result, result_mask;
  698. blender->blend(result, result_mask);
  699. LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  700. imwrite(result_name, result);
  701. LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
  702. return 0;
  703. }