PageRenderTime 55ms CodeModel.GetById 25ms RepoModel.GetById 0ms app.codeStats 1ms

/toolkits/computer_vision/stitching_detailed.cpp

https://github.com/michaelkook/GraphLab-2
C++ | 737 lines | 622 code | 73 blank | 42 comment | 231 complexity | 15d0d585f262d288b9c8d7f7cfbdc4c1 MD5 | raw file
Possible License(s): ISC, Apache-2.0
  1. /*
  2. * Copyright (c) 2009 Carnegie Mellon University.
  3. * All rights reserved.
  4. *
  5. * Licensed under the Apache License, Version 2.0 (the "License");
  6. * you may not use this file except in compliance with the License.
  7. * You may obtain a copy of the License at
  8. *
  9. * http://www.apache.org/licenses/LICENSE-2.0
  10. *
  11. * Unless required by applicable law or agreed to in writing,
  12. * software distributed under the License is distributed on an "AS
  13. * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
  14. * express or implied. See the License for the specific language
  15. * governing permissions and limitations under the License.
  16. *
  17. * For more about this software visit:
  18. *
  19. * http://www.graphlab.ml.cmu.edu
  20. *
  21. */
  22. #include <fstream>
  23. #include <string>
  24. #include "opencv2/opencv_modules.hpp"
  25. #include "opencv2/highgui/highgui.hpp"
  26. #include "opencv2/stitching/detail/autocalib.hpp"
  27. #include "opencv2/stitching/detail/blenders.hpp"
  28. #include "opencv2/stitching/detail/camera.hpp"
  29. #include "opencv2/stitching/detail/exposure_compensate.hpp"
  30. #include "opencv2/stitching/detail/matchers.hpp"
  31. #include "opencv2/stitching/detail/motion_estimators.hpp"
  32. #include "opencv2/stitching/detail/seam_finders.hpp"
  33. #include "opencv2/stitching/detail/util.hpp"
  34. #include "opencv2/stitching/detail/warpers.hpp"
  35. #include "opencv2/stitching/warpers.hpp"
  36. using namespace std;
  37. using namespace cv;
  38. using namespace cv::detail;
  39. void printUsage()
  40. {
  41. cout <<
  42. "Rotation model images stitcher.\n\n"
  43. "stitching_detailed img1 img2 [...imgN] [flags]\n\n"
  44. "Flags:\n"
  45. " --preview\n"
  46. " Run stitching in the preview mode. Works faster than usual mode,\n"
  47. " but output image will have lower resolution.\n"
  48. " --try_gpu (yes|no)\n"
  49. " Try to use GPU. The default value is 'no'. All default values\n"
  50. " are for CPU mode.\n"
  51. "\nMotion Estimation Flags:\n"
  52. " --work_megapix <float>\n"
  53. " Resolution for image registration step. The default is 0.6 Mpx.\n"
  54. " --features (surf|orb)\n"
  55. " Type of features used for images matching. The default is surf.\n"
  56. " --match_conf <float>\n"
  57. " Confidence for feature matching step. The default is 0.65 for surf and 0.3 for orb.\n"
  58. " --conf_thresh <float>\n"
  59. " Threshold for two images are from the same panorama confidence.\n"
  60. " The default is 1.0.\n"
  61. " --ba (reproj|ray)\n"
  62. " Bundle adjustment cost function. The default is ray.\n"
  63. " --ba_refine_mask (mask)\n"
  64. " Set refinement mask for bundle adjustment. It looks like 'x_xxx',\n"
  65. " where 'x' means refine respective parameter and '_' means don't\n"
  66. " refine one, and has the following format:\n"
  67. " <fx><skew><ppx><aspect><ppy>. The default mask is 'xxxxx'. If bundle\n"
  68. " adjustment doesn't support estimation of selected parameter then\n"
  69. " the respective flag is ignored.\n"
  70. " --wave_correct (no|horiz|vert)\n"
  71. " Perform wave effect correction. The default is 'horiz'.\n"
  72. " --save_graph <file_name>\n"
  73. " Save matches graph represented in DOT language to <file_name> file.\n"
  74. " Labels description: Nm is number of matches, Ni is number of inliers,\n"
  75. " C is confidence.\n"
  76. "\nCompositing Flags:\n"
  77. " --warp (plane|cylindrical|spherical|fisheye|stereographic|compressedPlaneA2B1|compressedPlaneA1.5B1|compressedPlanePortraitA2B1|compressedPlanePortraitA1.5B1|paniniA2B1|paniniA1.5B1|paniniPortraitA2B1|paniniPortraitA1.5B1|mercator|transverseMercator)\n"
  78. " Warp surface type. The default is 'spherical'.\n"
  79. " --seam_megapix <float>\n"
  80. " Resolution for seam estimation step. The default is 0.1 Mpx.\n"
  81. " --seam (no|voronoi|gc_color|gc_colorgrad)\n"
  82. " Seam estimation method. The default is 'gc_color'.\n"
  83. " --compose_megapix <float>\n"
  84. " Resolution for compositing step. Use -1 for original resolution.\n"
  85. " The default is -1.\n"
  86. " --expos_comp (no|gain|gain_blocks)\n"
  87. " Exposure compensation method. The default is 'gain_blocks'.\n"
  88. " --blend (no|feather|multiband)\n"
  89. " Blending method. The default is 'multiband'.\n"
  90. " --blend_strength <float>\n"
  91. " Blending strength from [0,100] range. The default is 5.\n"
  92. " --output <result_img>\n"
  93. " The default is 'result.jpg'.\n";
  94. }
  95. // Default command line args
  96. vector<string> img_names;
  97. bool preview = false;
  98. bool try_gpu = false;
  99. double work_megapix = 0.6;
  100. double seam_megapix = 0.1;
  101. double compose_megapix = -1;
  102. float conf_thresh = 1.f;
  103. string features = "surf";
  104. string ba_cost_func = "ray";
  105. string ba_refine_mask = "xxxxx";
  106. bool do_wave_correct = true;
  107. WaveCorrectKind wave_correct = detail::WAVE_CORRECT_HORIZ;
  108. bool save_graph = false;
  109. std::string save_graph_to;
  110. string warp_type = "spherical";
  111. int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
  112. float match_conf = 0.3f;
  113. string seam_find_type = "gc_color";
  114. int blend_type = Blender::MULTI_BAND;
  115. float blend_strength = 5;
  116. string result_name = "result.jpg";
  117. int parseCmdArgs(int argc, char** argv)
  118. {
  119. if (argc == 1)
  120. {
  121. printUsage();
  122. return -1;
  123. }
  124. for (int i = 1; i < argc; ++i)
  125. {
  126. if (string(argv[i]) == "--help" || string(argv[i]) == "/?")
  127. {
  128. printUsage();
  129. return -1;
  130. }
  131. else if (string(argv[i]) == "--preview")
  132. {
  133. preview = true;
  134. }
  135. else if (string(argv[i]) == "--try_gpu")
  136. {
  137. if (string(argv[i + 1]) == "no")
  138. try_gpu = false;
  139. else if (string(argv[i + 1]) == "yes")
  140. try_gpu = true;
  141. else
  142. {
  143. cout << "Bad --try_gpu flag value\n";
  144. return -1;
  145. }
  146. i++;
  147. }
  148. else if (string(argv[i]) == "--work_megapix")
  149. {
  150. work_megapix = atof(argv[i + 1]);
  151. i++;
  152. }
  153. else if (string(argv[i]) == "--seam_megapix")
  154. {
  155. seam_megapix = atof(argv[i + 1]);
  156. i++;
  157. }
  158. else if (string(argv[i]) == "--compose_megapix")
  159. {
  160. compose_megapix = atof(argv[i + 1]);
  161. i++;
  162. }
  163. else if (string(argv[i]) == "--result")
  164. {
  165. result_name = argv[i + 1];
  166. i++;
  167. }
  168. else if (string(argv[i]) == "--features")
  169. {
  170. features = argv[i + 1];
  171. if (features == "orb")
  172. match_conf = 0.3f;
  173. i++;
  174. }
  175. else if (string(argv[i]) == "--match_conf")
  176. {
  177. match_conf = static_cast<float>(atof(argv[i + 1]));
  178. i++;
  179. }
  180. else if (string(argv[i]) == "--conf_thresh")
  181. {
  182. conf_thresh = static_cast<float>(atof(argv[i + 1]));
  183. i++;
  184. }
  185. else if (string(argv[i]) == "--ba")
  186. {
  187. ba_cost_func = argv[i + 1];
  188. i++;
  189. }
  190. else if (string(argv[i]) == "--ba_refine_mask")
  191. {
  192. ba_refine_mask = argv[i + 1];
  193. if (ba_refine_mask.size() != 5)
  194. {
  195. cout << "Incorrect refinement mask length.\n";
  196. return -1;
  197. }
  198. i++;
  199. }
  200. else if (string(argv[i]) == "--wave_correct")
  201. {
  202. if (string(argv[i + 1]) == "no")
  203. do_wave_correct = false;
  204. else if (string(argv[i + 1]) == "horiz")
  205. {
  206. do_wave_correct = true;
  207. wave_correct = detail::WAVE_CORRECT_HORIZ;
  208. }
  209. else if (string(argv[i + 1]) == "vert")
  210. {
  211. do_wave_correct = true;
  212. wave_correct = detail::WAVE_CORRECT_VERT;
  213. }
  214. else
  215. {
  216. cout << "Bad --wave_correct flag value\n";
  217. return -1;
  218. }
  219. i++;
  220. }
  221. else if (string(argv[i]) == "--save_graph")
  222. {
  223. save_graph = true;
  224. save_graph_to = argv[i + 1];
  225. i++;
  226. }
  227. else if (string(argv[i]) == "--warp")
  228. {
  229. warp_type = string(argv[i + 1]);
  230. i++;
  231. }
  232. else if (string(argv[i]) == "--expos_comp")
  233. {
  234. if (string(argv[i + 1]) == "no")
  235. expos_comp_type = ExposureCompensator::NO;
  236. else if (string(argv[i + 1]) == "gain")
  237. expos_comp_type = ExposureCompensator::GAIN;
  238. else if (string(argv[i + 1]) == "gain_blocks")
  239. expos_comp_type = ExposureCompensator::GAIN_BLOCKS;
  240. else
  241. {
  242. cout << "Bad exposure compensation method\n";
  243. return -1;
  244. }
  245. i++;
  246. }
  247. else if (string(argv[i]) == "--seam")
  248. {
  249. if (string(argv[i + 1]) == "no" ||
  250. string(argv[i + 1]) == "voronoi" ||
  251. string(argv[i + 1]) == "gc_color" ||
  252. string(argv[i + 1]) == "gc_colorgrad")
  253. seam_find_type = argv[i + 1];
  254. else
  255. {
  256. cout << "Bad seam finding method\n";
  257. return -1;
  258. }
  259. i++;
  260. }
  261. else if (string(argv[i]) == "--blend")
  262. {
  263. if (string(argv[i + 1]) == "no")
  264. blend_type = Blender::NO;
  265. else if (string(argv[i + 1]) == "feather")
  266. blend_type = Blender::FEATHER;
  267. else if (string(argv[i + 1]) == "multiband")
  268. blend_type = Blender::MULTI_BAND;
  269. else
  270. {
  271. cout << "Bad blending method\n";
  272. return -1;
  273. }
  274. i++;
  275. }
  276. else if (string(argv[i]) == "--blend_strength")
  277. {
  278. blend_strength = static_cast<float>(atof(argv[i + 1]));
  279. i++;
  280. }
  281. else if (string(argv[i]) == "--output")
  282. {
  283. result_name = argv[i + 1];
  284. i++;
  285. }
  286. else
  287. img_names.push_back(argv[i]);
  288. }
  289. if (preview)
  290. {
  291. compose_megapix = 0.6;
  292. }
  293. return 0;
  294. }
  295. int main(int argc, char* argv[])
  296. {
  297. int64 app_start_time = getTickCount();
  298. cv::setBreakOnError(true);
  299. int retval = parseCmdArgs(argc, argv);
  300. if (retval)
  301. return retval;
  302. // Check if have enough images
  303. int num_images = static_cast<int>(img_names.size());
  304. if (num_images < 2)
  305. {
  306. LOGLN("Need more images");
  307. return -1;
  308. }
  309. double work_scale = 1, seam_scale = 1, compose_scale = 1;
  310. bool is_work_scale_set = false, is_seam_scale_set = false, is_compose_scale_set = false;
  311. LOGLN("Finding features...");
  312. int64 t = getTickCount();
  313. Ptr<FeaturesFinder> finder;
  314. if (features == "surf")
  315. {
  316. #ifdef HAVE_OPENCV_GPU
  317. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  318. finder = new SurfFeaturesFinderGpu();
  319. else
  320. #endif
  321. finder = new SurfFeaturesFinder();
  322. }
  323. else if (features == "orb")
  324. {
  325. finder = new OrbFeaturesFinder();
  326. }
  327. else
  328. {
  329. cout << "Unknown 2D features type: '" << features << "'.\n";
  330. return -1;
  331. }
  332. Mat full_img, img;
  333. vector<ImageFeatures> features(num_images);
  334. vector<Mat> images(num_images);
  335. vector<Size> full_img_sizes(num_images);
  336. double seam_work_aspect = 1;
  337. for (int i = 0; i < num_images; ++i)
  338. {
  339. full_img = imread(img_names[i]);
  340. full_img_sizes[i] = full_img.size();
  341. if (full_img.empty())
  342. {
  343. LOGLN("Can't open image " << img_names[i]);
  344. return -1;
  345. }
  346. if (work_megapix < 0)
  347. {
  348. img = full_img;
  349. work_scale = 1;
  350. is_work_scale_set = true;
  351. }
  352. else
  353. {
  354. if (!is_work_scale_set)
  355. {
  356. work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));
  357. is_work_scale_set = true;
  358. }
  359. resize(full_img, img, Size(), work_scale, work_scale);
  360. }
  361. if (!is_seam_scale_set)
  362. {
  363. seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
  364. seam_work_aspect = seam_scale / work_scale;
  365. is_seam_scale_set = true;
  366. }
  367. (*finder)(img, features[i]);
  368. features[i].img_idx = i;
  369. LOGLN("Features in image #" << i+1 << ": " << features[i].keypoints.size());
  370. resize(full_img, img, Size(), seam_scale, seam_scale);
  371. images[i] = img.clone();
  372. }
  373. finder->collectGarbage();
  374. full_img.release();
  375. img.release();
  376. LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  377. LOG("Pairwise matching");
  378. t = getTickCount();
  379. vector<MatchesInfo> pairwise_matches;
  380. BestOf2NearestMatcher matcher(try_gpu, match_conf);
  381. matcher(features, pairwise_matches);
  382. matcher.collectGarbage();
  383. LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  384. // Check if we should save matches graph
  385. if (save_graph)
  386. {
  387. LOGLN("Saving matches graph...");
  388. ofstream f(save_graph_to.c_str());
  389. f << matchesGraphAsString(img_names, pairwise_matches, conf_thresh);
  390. // for (int i=0; i!=pairwise_matches.size(); ++i)
  391. // f << pairwise_matches[i].src_img_idx << " " << pairwise_matches[i].dst_img_idx << "\n";
  392. }
  393. // Leave only images we are sure are from the same panorama
  394. vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
  395. vector<Mat> img_subset;
  396. vector<string> img_names_subset;
  397. vector<Size> full_img_sizes_subset;
  398. for (size_t i = 0; i < indices.size(); ++i)
  399. {
  400. img_names_subset.push_back(img_names[indices[i]]);
  401. img_subset.push_back(images[indices[i]]);
  402. full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
  403. }
  404. images = img_subset;
  405. img_names = img_names_subset;
  406. full_img_sizes = full_img_sizes_subset;
  407. // Check if we still have enough images
  408. num_images = static_cast<int>(img_names.size());
  409. if (num_images < 2)
  410. {
  411. LOGLN("Need more images");
  412. return -1;
  413. }
  414. LOG("Homography-based init\n");
  415. t = getTickCount();
  416. HomographyBasedEstimator estimator;
  417. vector<CameraParams> cameras;
  418. estimator(features, pairwise_matches, cameras);
  419. LOGLN("Homography-based init, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  420. for (size_t i = 0; i < cameras.size(); ++i)
  421. {
  422. Mat R;
  423. cameras[i].R.convertTo(R, CV_32F);
  424. cameras[i].R = R;
  425. LOGLN("Initial intrinsics #" << indices[i]+1 << ":\n" << cameras[i].K());
  426. }
  427. LOG("Bundle Adjustment\n");
  428. t = getTickCount();
  429. Ptr<detail::BundleAdjusterBase> adjuster;
  430. if (ba_cost_func == "reproj") adjuster = new detail::BundleAdjusterReproj();
  431. else if (ba_cost_func == "ray") adjuster = new detail::BundleAdjusterRay();
  432. else
  433. {
  434. cout << "Unknown bundle adjustment cost function: '" << ba_cost_func << "'.\n";
  435. return -1;
  436. }
  437. adjuster->setConfThresh(conf_thresh);
  438. Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
  439. if (ba_refine_mask[0] == 'x') refine_mask(0,0) = 1;
  440. if (ba_refine_mask[1] == 'x') refine_mask(0,1) = 1;
  441. if (ba_refine_mask[2] == 'x') refine_mask(0,2) = 1;
  442. if (ba_refine_mask[3] == 'x') refine_mask(1,1) = 1;
  443. if (ba_refine_mask[4] == 'x') refine_mask(1,2) = 1;
  444. adjuster->setRefinementMask(refine_mask);
  445. (*adjuster)(features, pairwise_matches, cameras);
  446. LOGLN("Bundle Adjustment, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  447. // Find median focal length
  448. vector<double> focals;
  449. for (size_t i = 0; i < cameras.size(); ++i)
  450. {
  451. LOGLN("Camera #" << indices[i]+1 << ":\n" << cameras[i].K());
  452. focals.push_back(cameras[i].focal);
  453. }
  454. sort(focals.begin(), focals.end());
  455. float warped_image_scale;
  456. if (focals.size() % 2 == 1)
  457. warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
  458. else
  459. warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
  460. if (do_wave_correct)
  461. {
  462. vector<Mat> rmats;
  463. for (size_t i = 0; i < cameras.size(); ++i)
  464. rmats.push_back(cameras[i].R);
  465. waveCorrect(rmats, wave_correct);
  466. for (size_t i = 0; i < cameras.size(); ++i)
  467. cameras[i].R = rmats[i];
  468. }
  469. LOGLN("Warping images (auxiliary)... ");
  470. t = getTickCount();
  471. vector<Point> corners(num_images);
  472. vector<Mat> masks_warped(num_images);
  473. vector<Mat> images_warped(num_images);
  474. vector<Size> sizes(num_images);
  475. vector<Mat> masks(num_images);
  476. // Preapre images masks
  477. for (int i = 0; i < num_images; ++i)
  478. {
  479. masks[i].create(images[i].size(), CV_8U);
  480. masks[i].setTo(Scalar::all(255));
  481. }
  482. // Warp images and their masks
  483. Ptr<WarperCreator> warper_creator;
  484. #ifdef HAVE_OPENCV_GPU
  485. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  486. {
  487. if (warp_type == "plane") warper_creator = new cv::PlaneWarperGpu();
  488. else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarperGpu();
  489. else if (warp_type == "spherical") warper_creator = new cv::SphericalWarperGpu();
  490. }
  491. else
  492. #endif
  493. {
  494. if (warp_type == "plane") warper_creator = new cv::PlaneWarper();
  495. else if (warp_type == "cylindrical") warper_creator = new cv::CylindricalWarper();
  496. else if (warp_type == "spherical") warper_creator = new cv::SphericalWarper();
  497. else if (warp_type == "fisheye") warper_creator = new cv::FisheyeWarper();
  498. else if (warp_type == "stereographic") warper_creator = new cv::StereographicWarper();
  499. else if (warp_type == "compressedPlaneA2B1") warper_creator = new cv::CompressedRectilinearWarper(2, 1);
  500. else if (warp_type == "compressedPlaneA1.5B1") warper_creator = new cv::CompressedRectilinearWarper(1.5, 1);
  501. else if (warp_type == "compressedPlanePortraitA2B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(2, 1);
  502. else if (warp_type == "compressedPlanePortraitA1.5B1") warper_creator = new cv::CompressedRectilinearPortraitWarper(1.5, 1);
  503. else if (warp_type == "paniniA2B1") warper_creator = new cv::PaniniWarper(2, 1);
  504. else if (warp_type == "paniniA1.5B1") warper_creator = new cv::PaniniWarper(1.5, 1);
  505. else if (warp_type == "paniniPortraitA2B1") warper_creator = new cv::PaniniPortraitWarper(2, 1);
  506. else if (warp_type == "paniniPortraitA1.5B1") warper_creator = new cv::PaniniPortraitWarper(1.5, 1);
  507. else if (warp_type == "mercator") warper_creator = new cv::MercatorWarper();
  508. else if (warp_type == "transverseMercator") warper_creator = new cv::TransverseMercatorWarper();
  509. }
  510. if (warper_creator.empty())
  511. {
  512. cout << "Can't create the following warper '" << warp_type << "'\n";
  513. return 1;
  514. }
  515. Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));
  516. for (int i = 0; i < num_images; ++i)
  517. {
  518. Mat_<float> K;
  519. cameras[i].K().convertTo(K, CV_32F);
  520. float swa = (float)seam_work_aspect;
  521. K(0,0) *= swa; K(0,2) *= swa;
  522. K(1,1) *= swa; K(1,2) *= swa;
  523. corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);
  524. sizes[i] = images_warped[i].size();
  525. warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
  526. }
  527. vector<Mat> images_warped_f(num_images);
  528. for (int i = 0; i < num_images; ++i)
  529. images_warped[i].convertTo(images_warped_f[i], CV_32F);
  530. LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  531. Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);
  532. compensator->feed(corners, images_warped, masks_warped);
  533. Ptr<SeamFinder> seam_finder;
  534. if (seam_find_type == "no")
  535. seam_finder = new detail::NoSeamFinder();
  536. else if (seam_find_type == "voronoi")
  537. seam_finder = new detail::VoronoiSeamFinder();
  538. else if (seam_find_type == "gc_color")
  539. {
  540. #ifdef HAVE_OPENCV_GPU
  541. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  542. seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR);
  543. else
  544. #endif
  545. seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);
  546. }
  547. else if (seam_find_type == "gc_colorgrad")
  548. {
  549. #ifdef HAVE_OPENCV_GPU
  550. if (try_gpu && gpu::getCudaEnabledDeviceCount() > 0)
  551. seam_finder = new detail::GraphCutSeamFinderGpu(GraphCutSeamFinderBase::COST_COLOR_GRAD);
  552. else
  553. #endif
  554. seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR_GRAD);
  555. }
  556. if (seam_finder.empty())
  557. {
  558. cout << "Can't create the following seam finder '" << seam_find_type << "'\n";
  559. return 1;
  560. }
  561. seam_finder->find(images_warped_f, corners, masks_warped);
  562. // Release unused memory
  563. images.clear();
  564. images_warped.clear();
  565. images_warped_f.clear();
  566. masks.clear();
  567. LOGLN("Compositing...");
  568. t = getTickCount();
  569. Mat img_warped, img_warped_s;
  570. Mat dilated_mask, seam_mask, mask, mask_warped;
  571. Ptr<Blender> blender;
  572. double compose_seam_aspect = 1;
  573. double compose_work_aspect = 1;
  574. for (int img_idx = 0; img_idx < num_images; ++img_idx)
  575. {
  576. LOGLN("Compositing image #" << indices[img_idx]+1);
  577. // Read image and resize it if necessary
  578. full_img = imread(img_names[img_idx]);
  579. if (!is_compose_scale_set)
  580. {
  581. if (compose_megapix > 0)
  582. compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));
  583. is_compose_scale_set = true;
  584. // Compute relative scales
  585. compose_seam_aspect = compose_scale / seam_scale;
  586. compose_work_aspect = compose_scale / work_scale;
  587. // Update warped image scale
  588. warped_image_scale *= static_cast<float>(compose_work_aspect);
  589. warper = warper_creator->create(warped_image_scale);
  590. // Update corners and sizes
  591. for (int i = 0; i < num_images; ++i)
  592. {
  593. // Update intrinsics
  594. cameras[i].focal *= compose_work_aspect;
  595. cameras[i].ppx *= compose_work_aspect;
  596. cameras[i].ppy *= compose_work_aspect;
  597. // Update corner and size
  598. Size sz = full_img_sizes[i];
  599. if (std::abs(compose_scale - 1) > 1e-1)
  600. {
  601. sz.width = cvRound(full_img_sizes[i].width * compose_scale);
  602. sz.height = cvRound(full_img_sizes[i].height * compose_scale);
  603. }
  604. Mat K;
  605. cameras[i].K().convertTo(K, CV_32F);
  606. Rect roi = warper->warpRoi(sz, K, cameras[i].R);
  607. corners[i] = roi.tl();
  608. sizes[i] = roi.size();
  609. }
  610. }
  611. if (abs(compose_scale - 1) > 1e-1)
  612. resize(full_img, img, Size(), compose_scale, compose_scale);
  613. else
  614. img = full_img;
  615. full_img.release();
  616. Size img_size = img.size();
  617. Mat K;
  618. cameras[img_idx].K().convertTo(K, CV_32F);
  619. // Warp the current image
  620. warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
  621. // Warp the current image mask
  622. mask.create(img_size, CV_8U);
  623. mask.setTo(Scalar::all(255));
  624. warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
  625. // Compensate exposure
  626. compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);
  627. img_warped.convertTo(img_warped_s, CV_16S);
  628. img_warped.release();
  629. img.release();
  630. mask.release();
  631. dilate(masks_warped[img_idx], dilated_mask, Mat());
  632. resize(dilated_mask, seam_mask, mask_warped.size());
  633. mask_warped = seam_mask & mask_warped;
  634. if (blender.empty())
  635. {
  636. blender = Blender::createDefault(blend_type, try_gpu);
  637. Size dst_sz = resultRoi(corners, sizes).size();
  638. float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
  639. if (blend_width < 1.f)
  640. blender = Blender::createDefault(Blender::NO, try_gpu);
  641. else if (blend_type == Blender::MULTI_BAND)
  642. {
  643. MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
  644. mb->setNumBands(static_cast<int>(ceil(log(blend_width)/log(2.)) - 1.));
  645. LOGLN("Multi-band blender, number of bands: " << mb->numBands());
  646. }
  647. else if (blend_type == Blender::FEATHER)
  648. {
  649. FeatherBlender* fb = dynamic_cast<FeatherBlender*>(static_cast<Blender*>(blender));
  650. fb->setSharpness(1.f/blend_width);
  651. LOGLN("Feather blender, sharpness: " << fb->sharpness());
  652. }
  653. blender->prepare(corners, sizes);
  654. }
  655. // Blend the current image
  656. blender->feed(img_warped_s, mask_warped, corners[img_idx]);
  657. }
  658. Mat result, result_mask;
  659. blender->blend(result, result_mask);
  660. LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
  661. imwrite(result_name, result);
  662. LOGLN("Finished, total time: " << ((getTickCount() - app_start_time) / getTickFrequency()) << " sec");
  663. return 0;
  664. }