/ocr/ocrservice/jni/opticalflow/optical_flow.cpp

http://eyes-free.googlecode.com/ · C++ · 652 lines · 419 code · 147 blank · 86 comment · 58 complexity · c182ffcc712f91333171ce6790039dd1 MD5 · raw file

  1. /*
  2. * Copyright 2011, Google Inc.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. // Author: Andrew Harp
  17. #include "utils.h"
  18. #include "time_log.h"
  19. #include "image.h"
  20. #include "math.h"
  21. #include "optical_flow.h"
  22. #include "feature_detector.h"
  23. namespace flow {
  24. OpticalFlow::OpticalFlow(const int32 frame_width,
  25. const int32 frame_height,
  26. const int32 downsample_factor) :
  27. downsample_factor_(downsample_factor),
  28. original_size_(frame_width, frame_height),
  29. working_size_(frame_width / downsample_factor_,
  30. frame_height / downsample_factor_),
  31. avg_g_x_(0.0f),
  32. avg_g_y_(0.0f),
  33. first_frame_index_(0),
  34. num_frames_(0),
  35. last_time_fresh_features_(0),
  36. frame_added_(false),
  37. features_computed_(false),
  38. flow_computed_(false) {
  39. for (int i = 0; i < NUM_FRAMES; ++i) {
  40. frame_pairs_[i].init(0);
  41. }
  42. interest_map_ = new Image<bool>(working_size_);
  43. feature_scratch_ = new Image<uint8>(working_size_);
  44. frame1_ = new ImageData(working_size_);
  45. frame2_ = new ImageData(working_size_);
  46. }
  47. OpticalFlow::~OpticalFlow() {
  48. // Delete all image storage.
  49. SAFE_DELETE(feature_scratch_);
  50. SAFE_DELETE(interest_map_);
  51. SAFE_DELETE(frame1_);
  52. SAFE_DELETE(frame2_);
  53. }
  54. void OpticalFlow::nextFrame(const uint8* const new_frame,
  55. const clock_t timestamp) {
  56. frame_added_ = false;
  57. features_computed_ = false;
  58. flow_computed_ = false;
  59. // Move the current framechange index up.
  60. ++num_frames_;
  61. // If we've got too many, push up the start of the queue.
  62. if (num_frames_ > NUM_FRAMES) {
  63. first_frame_index_ = geNthIndexFromStart(1);
  64. --num_frames_;
  65. }
  66. FramePair* const curr_change = frame_pairs_ + geNthIndexFromEnd(0);
  67. curr_change->init(timestamp);
  68. interest_map_->clear(false);
  69. // Cache out data from last frame.
  70. // Don't run on frame 0 (no point) or frame 1 (because the old frame will
  71. // already be in frame1_).
  72. if (num_frames_ > 2) {
  73. swap(&frame1_, &frame2_);
  74. timeLog("Copied data from last run");
  75. }
  76. frame2_->init(new_frame, original_size_.width, timestamp, downsample_factor_);
  77. // Special case for the first frame: make sure the image ends up in
  78. // frame1_ so that feature detection can be done on it if desired.
  79. // TODO(andrewharp): Make it so that feature detection is always done
  80. // on the last frame added.
  81. if (num_frames_ == 1) {
  82. swap(&frame1_, &frame2_);
  83. }
  84. frame_added_ = true;
  85. }
  86. void OpticalFlow::computeFlow() {
  87. CHECK(frame_added_ && features_computed_ && !flow_computed_,
  88. "Optical Flow function called out of order!");
  89. if (num_frames_ < 2) {
  90. LOGV("Frame index was %d, skipping computation.", num_frames_);
  91. return;
  92. }
  93. FramePair* const curr_change = &frame_pairs_[geNthIndexFromEnd(0)];
  94. findCorrespondences(curr_change);
  95. flow_computed_ = true;
  96. }
  97. void OpticalFlow::findFeatures(const FramePair& prev_change,
  98. FramePair* const curr_change) {
  99. int32 number_of_tmp_features = 0;
  100. // Copy features from second frame of last pass to temp features of this
  101. // pass.
  102. number_of_tmp_features = copyFeatures(prev_change, tmp_features_);
  103. const float32 buffer = 30.0f;
  104. number_of_tmp_features += seedFeatures(*frame1_->image_,
  105. FEATURE_GRID_WIDTH, FEATURE_GRID_HEIGHT,
  106. buffer, buffer,
  107. (float32) working_size_.width - buffer,
  108. (float32) working_size_.height - buffer, FEATURE_DEFAULT,
  109. tmp_features_ + number_of_tmp_features);
  110. timeLog("Seeded features.");
  111. const int32 max_num_fast = MAX_TEMP_FEATURES - number_of_tmp_features;
  112. number_of_tmp_features +=
  113. findFastFeatures(*frame1_->image_, max_num_fast,
  114. tmp_features_ + number_of_tmp_features,
  115. feature_scratch_);
  116. if (number_of_tmp_features >= MAX_TEMP_FEATURES) {
  117. LOGW("Hit cap of %d for temporary features!", number_of_tmp_features);
  118. }
  119. // Score them...
  120. scoreFeatures(*frame1_->spatial_x_[0], *frame1_->spatial_y_[0],
  121. number_of_tmp_features, tmp_features_);
  122. timeLog("Scored features");
  123. // Now pare it down a bit.
  124. curr_change->number_of_features_ = sortAndSelect(
  125. number_of_tmp_features,
  126. MAX_FEATURES,
  127. *interest_map_,
  128. tmp_features_,
  129. curr_change->frame1_features_,
  130. feature_scratch_);
  131. timeLog("Sorted and selected features");
  132. LOGV("Picked %d (%d max) final features out of %d potential.",
  133. curr_change->number_of_features_, MAX_FEATURES, number_of_tmp_features);
  134. last_time_fresh_features_ = curr_change->end_time;
  135. }
  136. int32 OpticalFlow::copyFeatures(const FramePair& prev_change,
  137. Point2D* const new_features) {
  138. int32 number_of_features = 0;
  139. // Caching values from last pass, just copy and compact.
  140. for (int32 i = 0; i < prev_change.number_of_features_; ++i) {
  141. if (prev_change.optical_flow_found_feature_[i]) {
  142. new_features[number_of_features] =
  143. prev_change.frame2_features_[i];
  144. new_features[number_of_features].score =
  145. prev_change.frame1_features_[i].score;
  146. ++number_of_features;
  147. }
  148. }
  149. timeLog("Copied features");
  150. return number_of_features;
  151. }
  152. void OpticalFlow::computeFeatures(const bool cached_ok) {
  153. CHECK(frame_added_ && !features_computed_ && !flow_computed_,
  154. "Optical Flow function called out of order!");
  155. const FramePair& prev_change = frame_pairs_[geNthIndexFromEnd(1)];
  156. FramePair* const curr_change = &frame_pairs_[geNthIndexFromEnd(0)];
  157. const int32 num_found_features = prev_change.countFoundFeatures();
  158. const clock_t ms_since_last_refresh =
  159. (curr_change->end_time - last_time_fresh_features_);
  160. if (cached_ok &&
  161. num_found_features >= MIN_FEATURES &&
  162. ms_since_last_refresh <= REGEN_FEATURES_MS) {
  163. // Reuse the found features from the last frame if we can.
  164. curr_change->number_of_features_ =
  165. copyFeatures(prev_change, curr_change->frame1_features_);
  166. } else {
  167. // Only find new features to track if we've lost too many since the last
  168. // time, or it's time to regenerate anyway.
  169. LOGV("Not enough features (%d/%d), or it's been too long (%ld), "
  170. "finding more.",
  171. num_found_features, MIN_FEATURES, ms_since_last_refresh);
  172. findFeatures(prev_change, curr_change);
  173. }
  174. features_computed_ = true;
  175. }
  176. int32 OpticalFlow::getFeatures(const bool only_found,
  177. float32* const out_data) const {
  178. CHECK(frame_added_ && features_computed_,
  179. "Optical Flow function called out of order!");
  180. int32 curr_feature = 0;
  181. const FramePair& change = frame_pairs_[geNthIndexFromEnd(0)];
  182. for (int32 i = 0; i < change.number_of_features_; ++i) {
  183. if (!only_found || change.optical_flow_found_feature_[i]) {
  184. const int base = curr_feature * FEATURE_STEP;
  185. out_data[base + 0] = change.frame1_features_[i].x * downsample_factor_;
  186. out_data[base + 1] = change.frame1_features_[i].y * downsample_factor_;
  187. out_data[base + 2] = change.optical_flow_found_feature_[i];
  188. out_data[base + 3] = change.frame2_features_[i].x * downsample_factor_;
  189. out_data[base + 4] = change.frame2_features_[i].y * downsample_factor_;
  190. out_data[base + 5] = change.frame1_features_[i].score;
  191. out_data[base + 6] = change.frame1_features_[i].type;
  192. ++curr_feature;
  193. }
  194. }
  195. LOGV("Got %d features.", curr_feature);
  196. return curr_feature;
  197. }
  198. // Finds the correspondences for all the points in the current pair of frames.
  199. // Stores the results in the given FramePair.
  200. void OpticalFlow::findCorrespondences(FramePair* const frame_pair) const {
  201. // Features aren't found until they're found.
  202. memset(frame_pair->optical_flow_found_feature_, false,
  203. sizeof(*frame_pair->optical_flow_found_feature_) * MAX_FEATURES);
  204. timeLog("Cleared old found features");
  205. int32 num_features_found = 0;
  206. // For every feature...
  207. for (int32 i_feat = 0; i_feat < frame_pair->number_of_features_; ++i_feat) {
  208. Point2D* feature1 = frame_pair->frame1_features_ + i_feat;
  209. Point2D* feature2 = frame_pair->frame2_features_ + i_feat;
  210. if (findFlowAtPoint(feature1->x, feature1->y,
  211. &feature2->x, &feature2->y)) {
  212. frame_pair->optical_flow_found_feature_[i_feat] = true;
  213. ++num_features_found;
  214. }
  215. }
  216. timeLog("Found correspondences");
  217. LOGV("Found %d of %d feature correspondences",
  218. num_features_found, frame_pair->number_of_features_);
  219. }
  220. // An implementation of the Pyramidal Lucas-Kanade Optical Flow algorithm.
  221. // See http://robots.stanford.edu/cs223b04/algo_tracking.pdf for details.
  222. bool OpticalFlow::findFlowAtPoint(const float32 u_x, const float32 u_y,
  223. float32* final_x, float32* final_y) const {
  224. const float32 threshold_squared = square(THRESHOLD);
  225. // Initial guess.
  226. float32 g_x = 0.0f;
  227. float32 g_y = 0.0f;
  228. // For every level in the pyramid, update the coordinates of the best match.
  229. for (int32 l = NUM_LEVELS - 1; l >= 0; --l) {
  230. // Shrink factor from original.
  231. const int32 shrink_factor = (1 << l);
  232. // Images I (prev) and J (next).
  233. const Image<uint8>& img_I = *frame1_->pyramid_[l];
  234. const Image<uint8>& img_J = *frame2_->pyramid_[l];
  235. // Computed gradients.
  236. const Image<int32>& I_x = *frame1_->spatial_x_[l];
  237. const Image<int32>& I_y = *frame1_->spatial_y_[l];
  238. // Image position vector (p := u^l), scaled for this level.
  239. const float32 p_x = u_x / static_cast<float32>(shrink_factor);
  240. const float32 p_y = u_y / static_cast<float32>(shrink_factor);
  241. // LOGV("Level %d: (%d, %d) / %d -> (%d, %d)",
  242. // l, u_x, u_y, shrink_factor, p_x, p_y);
  243. // Get values for frame 1. They remain constant through the inner
  244. // iteration loop.
  245. float32 vals_I[ARRAY_SIZE];
  246. float32 vals_I_x[ARRAY_SIZE];
  247. float32 vals_I_y[ARRAY_SIZE];
  248. int32 val_idx = 0;
  249. for (int32 win_x = -WINDOW_SIZE; win_x <= WINDOW_SIZE; ++win_x) {
  250. for (int32 win_y = -WINDOW_SIZE; win_y <= WINDOW_SIZE; ++win_y) {
  251. const float32 x_pos = p_x + win_x;
  252. const float32 y_pos = p_y + win_y;
  253. if (!img_I.validInterpPixel(x_pos, y_pos)) {
  254. return false;
  255. }
  256. vals_I[val_idx] = img_I.getPixelInterp(x_pos, y_pos);
  257. vals_I_x[val_idx] = I_x.getPixelInterp(x_pos, y_pos);
  258. vals_I_y[val_idx] = I_y.getPixelInterp(x_pos, y_pos);
  259. ++val_idx;
  260. }
  261. }
  262. // Compute the spatial gradient matrix about point p.
  263. float32 G[] = { 0, 0, 0, 0 };
  264. calculateG(vals_I_x, vals_I_y, ARRAY_SIZE, G);
  265. // Find the inverse of G.
  266. float32 G_inv[4];
  267. if (!invert2x2(G, G_inv)) {
  268. // If we can't invert, hope that the next level will have better luck.
  269. continue;
  270. }
  271. #ifdef NORMALIZE
  272. const float32 mean_I = computeMean(vals_I, ARRAY_SIZE);
  273. const float32 std_dev_I = computeStdDev(vals_I, ARRAY_SIZE, mean_I);
  274. #endif
  275. // Iterate NUM_ITERATIONS times or until we converge.
  276. for (int32 iteration = 0; iteration < NUM_ITERATIONS; ++iteration) {
  277. // Get values for frame 2.
  278. float32 vals_J[ARRAY_SIZE];
  279. int32 val_idx = 0;
  280. for (int32 win_x = -WINDOW_SIZE; win_x <= WINDOW_SIZE; ++win_x) {
  281. for (int32 win_y = -WINDOW_SIZE; win_y <= WINDOW_SIZE; ++win_y) {
  282. const float32 x_pos = p_x + win_x + g_x;
  283. const float32 y_pos = p_y + win_y + g_y;
  284. if (!img_I.validInterpPixel(x_pos, y_pos)) {
  285. return false;
  286. }
  287. vals_J[val_idx] = img_J.getPixelInterp(x_pos, y_pos);
  288. ++val_idx;
  289. }
  290. }
  291. #ifdef NORMALIZE
  292. const float32 mean_J = computeMean(vals_J, ARRAY_SIZE);
  293. const float32 std_dev_J = computeStdDev(vals_J, ARRAY_SIZE, mean_J);
  294. const float32 std_dev_ratio = std_dev_I / std_dev_J;
  295. #endif
  296. // Compute image mismatch vector.
  297. float32 b_x = 0.0f;
  298. float32 b_y = 0.0f;
  299. val_idx = 0;
  300. for (int32 win_x = -WINDOW_SIZE; win_x <= WINDOW_SIZE; ++win_x) {
  301. for (int32 win_y = -WINDOW_SIZE; win_y <= WINDOW_SIZE; ++win_y) {
  302. // Normalized Image difference.
  303. #ifdef NORMALIZE
  304. const float32 dI = (vals_I[val_idx] - mean_I) -
  305. (vals_J[val_idx] - mean_J) * std_dev_ratio;
  306. #else
  307. const float32 dI = vals_I[val_idx] - vals_J[val_idx];
  308. #endif
  309. b_x += dI * vals_I_x[val_idx];
  310. b_y += dI * vals_I_y[val_idx];
  311. ++val_idx;
  312. }
  313. }
  314. // Optical flow... solve n = G^-1 * b
  315. const float32 n_x = (G_inv[0] * b_x) + (G_inv[1] * b_y);
  316. const float32 n_y = (G_inv[2] * b_x) + (G_inv[3] * b_y);
  317. // Update best guess with residual displacement from this level and
  318. // iteration.
  319. g_x += n_x;
  320. g_y += n_y;
  321. // LOGV("Iteration %d: delta (%.3f, %.3f)", iteration, n_x, n_y);
  322. // Abort early if we're already below the threshold.
  323. if (square(n_x) + square(n_y) < threshold_squared) {
  324. break;
  325. }
  326. } // Iteration.
  327. if (l > 0) {
  328. // Every lower level of the pyramid is 2x as large dimensionally.
  329. g_x = 2.0f * g_x;
  330. g_y = 2.0f * g_y;
  331. }
  332. } // Level.
  333. // LOGV("Final displacement for feature %d was (%.2f, %.2f)",
  334. // iFeat, g_x, g_y);
  335. *final_x = u_x + g_x;
  336. *final_y = u_y + g_y;
  337. // Assign the best guess, if we're still in the image.
  338. if (frame1_->pyramid_[0]->validInterpPixel(*final_x, *final_y)) {
  339. return true;
  340. } else {
  341. return false;
  342. }
  343. }
  344. void OpticalFlow::addInterestRegion(const int32 num_x, const int32 num_y,
  345. float32 left, float32 top,
  346. float32 right, float32 bottom) {
  347. left = max(left / downsample_factor_, 0);
  348. top = max(top / downsample_factor_, 0);
  349. right = min(right / downsample_factor_, working_size_.width - 1);
  350. bottom = min(bottom / downsample_factor_, working_size_.height - 1);
  351. if (left > right || top > bottom) {
  352. return;
  353. }
  354. // This is inclusive of the border pixels, hence the +1.
  355. const int32 width = right - left + 1;
  356. // Also inclusive, so it uses a LTE.
  357. for (int32 y = top; y <= bottom; ++y) {
  358. bool* row_start = interest_map_->getPixelPtr(left, y);
  359. memset(row_start, true, width * sizeof(*row_start));
  360. }
  361. }
  362. Point2D OpticalFlow::getAccumulatedDelta(const Point2D& position,
  363. const float32 radius,
  364. const clock_t timestamp) const {
  365. Point2D curr_pos(position);
  366. // Scale down to downsampled size.
  367. curr_pos.x /= downsample_factor_;
  368. curr_pos.y /= downsample_factor_;
  369. LOGV("Tracking accumulated delta from %.2f, %.2f", curr_pos.x, curr_pos.y);
  370. const float32 cutoff_dist = radius / downsample_factor_;
  371. // Anything that ended before the requested timestamp is of no concern to us.
  372. bool found_it = false;
  373. int32 num_frames_back = -1;
  374. for (int32 i = 0; i < num_frames_; ++i) {
  375. const FramePair& frame_pair =
  376. frame_pairs_[geNthIndexFromEnd(i)];
  377. if (frame_pair.end_time <= timestamp) {
  378. num_frames_back = i - 1;
  379. if (num_frames_back > 0) {
  380. LOGV("Went %d out of %d frames before finding frame. (index: %d)",
  381. num_frames_back, num_frames_, geNthIndexFromEnd(i));
  382. }
  383. found_it = true;
  384. break;
  385. }
  386. }
  387. if (!found_it) {
  388. const FramePair& frame_pair = frame_pairs_[geNthIndexFromStart(0)];
  389. const FramePair& latest_frame_pair = frame_pairs_[geNthIndexFromEnd(0)];
  390. clock_t latest_time = latest_frame_pair.end_time;
  391. LOGW("History did not go back far enough! %ld vs %ld",
  392. latest_time - frame_pair.end_time,
  393. latest_time - timestamp);
  394. }
  395. // Loop over all the frames in the queue, tracking the accumulated delta
  396. // of the point from frame to frame. It's possible the point could
  397. // go out of frame, but keep tracking as best we can, using points near
  398. // the edge of the screen where it went out of bounds.
  399. for (int32 i = num_frames_back; i >= 0; --i) {
  400. const FramePair& frame_pair = frame_pairs_[geNthIndexFromEnd(i)];
  401. CHECK(frame_pair.end_time >= timestamp, "Frame timestamp was too early!");
  402. const Point2D delta = frame_pair.queryFlow(curr_pos, cutoff_dist);
  403. curr_pos.x += delta.x;
  404. curr_pos.y += delta.y;
  405. }
  406. // Scale back to original size.
  407. curr_pos.x *= downsample_factor_;
  408. curr_pos.y *= downsample_factor_;
  409. // Return the delta only.
  410. return curr_pos - position;
  411. }
  412. void FramePair::init(const clock_t end_time) {
  413. this->end_time = end_time;
  414. memset(optical_flow_found_feature_, false,
  415. sizeof(*optical_flow_found_feature_) * MAX_FEATURES);
  416. number_of_features_ = 0;
  417. }
  418. Point2D FramePair::queryFlow(
  419. const Point2D& initial, const float32 cutoff_dist) const {
  420. float32 weights[MAX_FEATURES];
  421. memset(weights, 0, sizeof(float32) * MAX_FEATURES);
  422. // Compute the max score.
  423. float32 max_score = 0.0f;
  424. for (int32 i = 0; i < MAX_FEATURES; ++i) {
  425. if (optical_flow_found_feature_[i]) {
  426. max_score = max(max_score, frame1_features_[i].score);
  427. }
  428. }
  429. const float32 cutoff_dist_squared = cutoff_dist * cutoff_dist;
  430. for (int32 i = 0; i < MAX_FEATURES; ++i) {
  431. if (optical_flow_found_feature_[i]) {
  432. const float32 sq_x_dist = square(initial.x - frame1_features_[i].x);
  433. const float32 sq_y_dist = square(initial.y - frame1_features_[i].y);
  434. const float32 dist_squared = sq_x_dist + sq_y_dist;
  435. // The weighting based off distance. Anything within the cuttoff
  436. // distance has a weight of 1, and everything outside of that is within
  437. // the range [0, 1).
  438. const float32 distance_score =
  439. min(cutoff_dist_squared / dist_squared, 1.0f);
  440. // The weighting based on score strength. 0.5f - 1.0f.
  441. float32 intrinsic_score = 1.0f;
  442. if (max_score > 0) {
  443. intrinsic_score = (frame1_features_[i].score / max_score) / 2.0f;
  444. }
  445. // The final score will be in the range [0, 1].
  446. weights[i] = distance_score * intrinsic_score;
  447. }
  448. }
  449. return getWeightedDelta(weights);
  450. }
  451. Point2D FramePair::getWeightedDelta(const float32* const weights) const {
  452. float32 total_weight = 0.0f;
  453. float32 weighted_sum_x = 0.0f;
  454. float32 weighted_sum_y = 0.0f;
  455. Point2D deltas[MAX_FEATURES];
  456. // Compute weighted mean and deltas.
  457. for (int32 i = 0; i < MAX_FEATURES; ++i) {
  458. const float32 weight = weights[i];
  459. if (weight > 0.0f) {
  460. deltas[i] = frame2_features_[i] - frame1_features_[i];
  461. weighted_sum_x += deltas[i].x * weight;
  462. weighted_sum_y += deltas[i].y * weight;
  463. total_weight += weight;
  464. }
  465. }
  466. const float32 weighted_mean_x = weighted_sum_x / total_weight;
  467. const float32 weighted_mean_y = weighted_sum_y / total_weight;
  468. // Compute weighted squared standard deviation from weighted mean.
  469. float32 weighted_dev_squared_sum = 0.0f;
  470. for (int32 i = 0; i < MAX_FEATURES; ++i) {
  471. const float32 weight = weights[i];
  472. if (weight > 0.0f) {
  473. const float32 devX = deltas[i].x - weighted_mean_x;
  474. const float32 devY = deltas[i].y - weighted_mean_y;
  475. const float32 squared_deviation = (devX * devX) + (devY * devY);
  476. weighted_dev_squared_sum += squared_deviation * weight;
  477. }
  478. }
  479. const float32 weighted_std_dev_squared =
  480. weighted_dev_squared_sum / total_weight;
  481. // Recompute weighted mean change without outliers.
  482. float32 good_weight = 0.0f;
  483. float32 good_sum_x = 0.0f;
  484. float32 good_sum_y = 0.0f;
  485. for (int32 i = 0; i < MAX_FEATURES; ++i) {
  486. const float32 weight = weights[i];
  487. if (weight > 0.0f) {
  488. const float32 dev_x = deltas[i].x - weighted_mean_x;
  489. const float32 dev_y = deltas[i].y - weighted_mean_y;
  490. const float32 sqrd_deviation = (dev_x * dev_x) + (dev_y * dev_y);
  491. // Throw out anything beyond NUM_DEVIATIONS.
  492. if (sqrd_deviation <= NUM_DEVIATIONS * weighted_std_dev_squared) {
  493. good_sum_x += deltas[i].x * weight;
  494. good_sum_y += deltas[i].y * weight;
  495. good_weight += weight;
  496. }
  497. }
  498. }
  499. if (good_weight > 0.0f) {
  500. return Point2D(good_sum_x / good_weight, good_sum_y / good_weight);
  501. } else {
  502. return Point2D(0.0f, 0.0f);
  503. }
  504. }
  505. } // namespace flow