/media/libvpx/vp8/encoder/rdopt.c

http://github.com/zpao/v8monkey · C · 2440 lines · 1814 code · 453 blank · 173 comment · 283 complexity · b514ae315fad58f484ed2f377e680084 MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
  3. *
  4. * Use of this source code is governed by a BSD-style license
  5. * that can be found in the LICENSE file in the root of the source
  6. * tree. An additional intellectual property rights grant can be found
  7. * in the file PATENTS. All contributing project authors may
  8. * be found in the AUTHORS file in the root of the source tree.
  9. */
  10. #include <stdio.h>
  11. #include <math.h>
  12. #include <limits.h>
  13. #include <assert.h>
  14. #include "vp8/common/pragmas.h"
  15. #include "tokenize.h"
  16. #include "treewriter.h"
  17. #include "onyx_int.h"
  18. #include "modecosts.h"
  19. #include "encodeintra.h"
  20. #include "vp8/common/entropymode.h"
  21. #include "vp8/common/reconinter.h"
  22. #include "vp8/common/reconintra.h"
  23. #include "vp8/common/reconintra4x4.h"
  24. #include "vp8/common/findnearmv.h"
  25. #include "encodemb.h"
  26. #include "quantize.h"
  27. #include "vp8/common/idct.h"
  28. #include "vp8/common/g_common.h"
  29. #include "variance.h"
  30. #include "mcomp.h"
  31. #include "rdopt.h"
  32. #include "vpx_mem/vpx_mem.h"
  33. #include "dct.h"
  34. #include "vp8/common/systemdependent.h"
  35. #if CONFIG_RUNTIME_CPU_DETECT
  36. #define IF_RTCD(x) (x)
  37. #else
  38. #define IF_RTCD(x) NULL
  39. #endif
  40. extern void vp8cx_mb_init_quantizer(VP8_COMP *cpi, MACROBLOCK *x);
  41. extern void vp8_update_zbin_extra(VP8_COMP *cpi, MACROBLOCK *x);
  42. #define MAXF(a,b) (((a) > (b)) ? (a) : (b))
  43. static const int auto_speed_thresh[17] =
  44. {
  45. 1000,
  46. 200,
  47. 150,
  48. 130,
  49. 150,
  50. 125,
  51. 120,
  52. 115,
  53. 115,
  54. 115,
  55. 115,
  56. 115,
  57. 115,
  58. 115,
  59. 115,
  60. 115,
  61. 105
  62. };
  63. const MB_PREDICTION_MODE vp8_mode_order[MAX_MODES] =
  64. {
  65. ZEROMV,
  66. DC_PRED,
  67. NEARESTMV,
  68. NEARMV,
  69. ZEROMV,
  70. NEARESTMV,
  71. ZEROMV,
  72. NEARESTMV,
  73. NEARMV,
  74. NEARMV,
  75. V_PRED,
  76. H_PRED,
  77. TM_PRED,
  78. NEWMV,
  79. NEWMV,
  80. NEWMV,
  81. SPLITMV,
  82. SPLITMV,
  83. SPLITMV,
  84. B_PRED,
  85. };
  86. const MV_REFERENCE_FRAME vp8_ref_frame_order[MAX_MODES] =
  87. {
  88. LAST_FRAME,
  89. INTRA_FRAME,
  90. LAST_FRAME,
  91. LAST_FRAME,
  92. GOLDEN_FRAME,
  93. GOLDEN_FRAME,
  94. ALTREF_FRAME,
  95. ALTREF_FRAME,
  96. GOLDEN_FRAME,
  97. ALTREF_FRAME,
  98. INTRA_FRAME,
  99. INTRA_FRAME,
  100. INTRA_FRAME,
  101. LAST_FRAME,
  102. GOLDEN_FRAME,
  103. ALTREF_FRAME,
  104. LAST_FRAME,
  105. GOLDEN_FRAME,
  106. ALTREF_FRAME,
  107. INTRA_FRAME,
  108. };
  109. static void fill_token_costs(
  110. unsigned int c [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [MAX_ENTROPY_TOKENS],
  111. const vp8_prob p [BLOCK_TYPES] [COEF_BANDS] [PREV_COEF_CONTEXTS] [ENTROPY_NODES]
  112. )
  113. {
  114. int i, j, k;
  115. for (i = 0; i < BLOCK_TYPES; i++)
  116. for (j = 0; j < COEF_BANDS; j++)
  117. for (k = 0; k < PREV_COEF_CONTEXTS; k++)
  118. vp8_cost_tokens((int *)(c [i][j][k]), p [i][j][k], vp8_coef_tree);
  119. }
  120. static int rd_iifactor [ 32 ] = { 4, 4, 3, 2, 1, 0, 0, 0,
  121. 0, 0, 0, 0, 0, 0, 0, 0,
  122. 0, 0, 0, 0, 0, 0, 0, 0,
  123. 0, 0, 0, 0, 0, 0, 0, 0,
  124. };
  125. /* values are now correlated to quantizer */
  126. static int sad_per_bit16lut[QINDEX_RANGE] =
  127. {
  128. 2, 2, 2, 2, 2, 2, 2, 2,
  129. 2, 2, 2, 2, 2, 2, 2, 2,
  130. 3, 3, 3, 3, 3, 3, 3, 3,
  131. 3, 3, 3, 3, 3, 3, 4, 4,
  132. 4, 4, 4, 4, 4, 4, 4, 4,
  133. 4, 4, 5, 5, 5, 5, 5, 5,
  134. 5, 5, 5, 5, 5, 5, 6, 6,
  135. 6, 6, 6, 6, 6, 6, 6, 6,
  136. 6, 6, 7, 7, 7, 7, 7, 7,
  137. 7, 7, 7, 7, 7, 7, 8, 8,
  138. 8, 8, 8, 8, 8, 8, 8, 8,
  139. 8, 8, 9, 9, 9, 9, 9, 9,
  140. 9, 9, 9, 9, 9, 9, 10, 10,
  141. 10, 10, 10, 10, 10, 10, 11, 11,
  142. 11, 11, 11, 11, 12, 12, 12, 12,
  143. 12, 12, 13, 13, 13, 13, 14, 14
  144. };
  145. static int sad_per_bit4lut[QINDEX_RANGE] =
  146. {
  147. 2, 2, 2, 2, 2, 2, 3, 3,
  148. 3, 3, 3, 3, 3, 3, 3, 3,
  149. 3, 3, 3, 3, 4, 4, 4, 4,
  150. 4, 4, 4, 4, 4, 4, 5, 5,
  151. 5, 5, 5, 5, 6, 6, 6, 6,
  152. 6, 6, 6, 6, 6, 6, 6, 6,
  153. 7, 7, 7, 7, 7, 7, 7, 7,
  154. 7, 7, 7, 7, 7, 8, 8, 8,
  155. 8, 8, 9, 9, 9, 9, 9, 9,
  156. 10, 10, 10, 10, 10, 10, 10, 10,
  157. 11, 11, 11, 11, 11, 11, 11, 11,
  158. 12, 12, 12, 12, 12, 12, 12, 12,
  159. 13, 13, 13, 13, 13, 13, 13, 14,
  160. 14, 14, 14, 14, 15, 15, 15, 15,
  161. 16, 16, 16, 16, 17, 17, 17, 18,
  162. 18, 18, 19, 19, 19, 20, 20, 20,
  163. };
  164. void vp8cx_initialize_me_consts(VP8_COMP *cpi, int QIndex)
  165. {
  166. cpi->mb.sadperbit16 = sad_per_bit16lut[QIndex];
  167. cpi->mb.sadperbit4 = sad_per_bit4lut[QIndex];
  168. }
  169. void vp8_initialize_rd_consts(VP8_COMP *cpi, int Qvalue)
  170. {
  171. int q;
  172. int i;
  173. double capped_q = (Qvalue < 160) ? (double)Qvalue : 160.0;
  174. double rdconst = 2.70;
  175. vp8_clear_system_state(); //__asm emms;
  176. // Further tests required to see if optimum is different
  177. // for key frames, golden frames and arf frames.
  178. // if (cpi->common.refresh_golden_frame ||
  179. // cpi->common.refresh_alt_ref_frame)
  180. cpi->RDMULT = (int)(rdconst * (capped_q * capped_q));
  181. // Extend rate multiplier along side quantizer zbin increases
  182. if (cpi->zbin_over_quant > 0)
  183. {
  184. double oq_factor;
  185. double modq;
  186. // Experimental code using the same basic equation as used for Q above
  187. // The units of cpi->zbin_over_quant are 1/128 of Q bin size
  188. oq_factor = 1.0 + ((double)0.0015625 * cpi->zbin_over_quant);
  189. modq = (int)((double)capped_q * oq_factor);
  190. cpi->RDMULT = (int)(rdconst * (modq * modq));
  191. }
  192. if (cpi->pass == 2 && (cpi->common.frame_type != KEY_FRAME))
  193. {
  194. if (cpi->twopass.next_iiratio > 31)
  195. cpi->RDMULT += (cpi->RDMULT * rd_iifactor[31]) >> 4;
  196. else
  197. cpi->RDMULT +=
  198. (cpi->RDMULT * rd_iifactor[cpi->twopass.next_iiratio]) >> 4;
  199. }
  200. cpi->mb.errorperbit = (cpi->RDMULT / 110);
  201. cpi->mb.errorperbit += (cpi->mb.errorperbit==0);
  202. vp8_set_speed_features(cpi);
  203. q = (int)pow(Qvalue, 1.25);
  204. if (q < 8)
  205. q = 8;
  206. if (cpi->RDMULT > 1000)
  207. {
  208. cpi->RDDIV = 1;
  209. cpi->RDMULT /= 100;
  210. for (i = 0; i < MAX_MODES; i++)
  211. {
  212. if (cpi->sf.thresh_mult[i] < INT_MAX)
  213. {
  214. cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q / 100;
  215. }
  216. else
  217. {
  218. cpi->rd_threshes[i] = INT_MAX;
  219. }
  220. cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
  221. }
  222. }
  223. else
  224. {
  225. cpi->RDDIV = 100;
  226. for (i = 0; i < MAX_MODES; i++)
  227. {
  228. if (cpi->sf.thresh_mult[i] < (INT_MAX / q))
  229. {
  230. cpi->rd_threshes[i] = cpi->sf.thresh_mult[i] * q;
  231. }
  232. else
  233. {
  234. cpi->rd_threshes[i] = INT_MAX;
  235. }
  236. cpi->rd_baseline_thresh[i] = cpi->rd_threshes[i];
  237. }
  238. }
  239. fill_token_costs(
  240. cpi->mb.token_costs,
  241. (const vp8_prob( *)[8][3][11]) cpi->common.fc.coef_probs
  242. );
  243. vp8_init_mode_costs(cpi);
  244. }
  245. void vp8_auto_select_speed(VP8_COMP *cpi)
  246. {
  247. int milliseconds_for_compress = (int)(1000000 / cpi->oxcf.frame_rate);
  248. milliseconds_for_compress = milliseconds_for_compress * (16 - cpi->oxcf.cpu_used) / 16;
  249. #if 0
  250. if (0)
  251. {
  252. FILE *f;
  253. f = fopen("speed.stt", "a");
  254. fprintf(f, " %8ld %10ld %10ld %10ld\n",
  255. cpi->common.current_video_frame, cpi->Speed, milliseconds_for_compress, cpi->avg_pick_mode_time);
  256. fclose(f);
  257. }
  258. #endif
  259. /*
  260. // this is done during parameter valid check
  261. if( cpi->oxcf.cpu_used > 16)
  262. cpi->oxcf.cpu_used = 16;
  263. if( cpi->oxcf.cpu_used < -16)
  264. cpi->oxcf.cpu_used = -16;
  265. */
  266. if (cpi->avg_pick_mode_time < milliseconds_for_compress && (cpi->avg_encode_time - cpi->avg_pick_mode_time) < milliseconds_for_compress)
  267. {
  268. if (cpi->avg_pick_mode_time == 0)
  269. {
  270. cpi->Speed = 4;
  271. }
  272. else
  273. {
  274. if (milliseconds_for_compress * 100 < cpi->avg_encode_time * 95)
  275. {
  276. cpi->Speed += 2;
  277. cpi->avg_pick_mode_time = 0;
  278. cpi->avg_encode_time = 0;
  279. if (cpi->Speed > 16)
  280. {
  281. cpi->Speed = 16;
  282. }
  283. }
  284. if (milliseconds_for_compress * 100 > cpi->avg_encode_time * auto_speed_thresh[cpi->Speed])
  285. {
  286. cpi->Speed -= 1;
  287. cpi->avg_pick_mode_time = 0;
  288. cpi->avg_encode_time = 0;
  289. // In real-time mode, cpi->speed is in [4, 16].
  290. if (cpi->Speed < 4) //if ( cpi->Speed < 0 )
  291. {
  292. cpi->Speed = 4; //cpi->Speed = 0;
  293. }
  294. }
  295. }
  296. }
  297. else
  298. {
  299. cpi->Speed += 4;
  300. if (cpi->Speed > 16)
  301. cpi->Speed = 16;
  302. cpi->avg_pick_mode_time = 0;
  303. cpi->avg_encode_time = 0;
  304. }
  305. }
  306. int vp8_block_error_c(short *coeff, short *dqcoeff)
  307. {
  308. int i;
  309. int error = 0;
  310. for (i = 0; i < 16; i++)
  311. {
  312. int this_diff = coeff[i] - dqcoeff[i];
  313. error += this_diff * this_diff;
  314. }
  315. return error;
  316. }
  317. int vp8_mbblock_error_c(MACROBLOCK *mb, int dc)
  318. {
  319. BLOCK *be;
  320. BLOCKD *bd;
  321. int i, j;
  322. int berror, error = 0;
  323. for (i = 0; i < 16; i++)
  324. {
  325. be = &mb->block[i];
  326. bd = &mb->e_mbd.block[i];
  327. berror = 0;
  328. for (j = dc; j < 16; j++)
  329. {
  330. int this_diff = be->coeff[j] - bd->dqcoeff[j];
  331. berror += this_diff * this_diff;
  332. }
  333. error += berror;
  334. }
  335. return error;
  336. }
  337. int vp8_mbuverror_c(MACROBLOCK *mb)
  338. {
  339. BLOCK *be;
  340. BLOCKD *bd;
  341. int i;
  342. int error = 0;
  343. for (i = 16; i < 24; i++)
  344. {
  345. be = &mb->block[i];
  346. bd = &mb->e_mbd.block[i];
  347. error += vp8_block_error_c(be->coeff, bd->dqcoeff);
  348. }
  349. return error;
  350. }
  351. int VP8_UVSSE(MACROBLOCK *x, const vp8_variance_rtcd_vtable_t *rtcd)
  352. {
  353. unsigned char *uptr, *vptr;
  354. unsigned char *upred_ptr = (*(x->block[16].base_src) + x->block[16].src);
  355. unsigned char *vpred_ptr = (*(x->block[20].base_src) + x->block[20].src);
  356. int uv_stride = x->block[16].src_stride;
  357. unsigned int sse1 = 0;
  358. unsigned int sse2 = 0;
  359. int mv_row;
  360. int mv_col;
  361. int offset;
  362. int pre_stride = x->e_mbd.block[16].pre_stride;
  363. vp8_build_uvmvs(&x->e_mbd, 0);
  364. mv_row = x->e_mbd.block[16].bmi.mv.as_mv.row;
  365. mv_col = x->e_mbd.block[16].bmi.mv.as_mv.col;
  366. offset = (mv_row >> 3) * pre_stride + (mv_col >> 3);
  367. uptr = x->e_mbd.pre.u_buffer + offset;
  368. vptr = x->e_mbd.pre.v_buffer + offset;
  369. if ((mv_row | mv_col) & 7)
  370. {
  371. VARIANCE_INVOKE(rtcd, subpixvar8x8)(uptr, pre_stride,
  372. mv_col & 7, mv_row & 7, upred_ptr, uv_stride, &sse2);
  373. VARIANCE_INVOKE(rtcd, subpixvar8x8)(vptr, pre_stride,
  374. mv_col & 7, mv_row & 7, vpred_ptr, uv_stride, &sse1);
  375. sse2 += sse1;
  376. }
  377. else
  378. {
  379. VARIANCE_INVOKE(rtcd, var8x8)(uptr, pre_stride,
  380. upred_ptr, uv_stride, &sse2);
  381. VARIANCE_INVOKE(rtcd, var8x8)(vptr, pre_stride,
  382. vpred_ptr, uv_stride, &sse1);
  383. sse2 += sse1;
  384. }
  385. return sse2;
  386. }
  387. static int cost_coeffs(MACROBLOCK *mb, BLOCKD *b, int type, ENTROPY_CONTEXT *a, ENTROPY_CONTEXT *l)
  388. {
  389. int c = !type; /* start at coef 0, unless Y with Y2 */
  390. int eob = b->eob;
  391. int pt ; /* surrounding block/prev coef predictor */
  392. int cost = 0;
  393. short *qcoeff_ptr = b->qcoeff;
  394. VP8_COMBINEENTROPYCONTEXTS(pt, *a, *l);
  395. # define QC( I) ( qcoeff_ptr [vp8_default_zig_zag1d[I]] )
  396. for (; c < eob; c++)
  397. {
  398. int v = QC(c);
  399. int t = vp8_dct_value_tokens_ptr[v].Token;
  400. cost += mb->token_costs [type] [vp8_coef_bands[c]] [pt] [t];
  401. cost += vp8_dct_value_cost_ptr[v];
  402. pt = vp8_prev_token_class[t];
  403. }
  404. # undef QC
  405. if (c < 16)
  406. cost += mb->token_costs [type] [vp8_coef_bands[c]] [pt] [DCT_EOB_TOKEN];
  407. pt = (c != !type); // is eob first coefficient;
  408. *a = *l = pt;
  409. return cost;
  410. }
  411. static int vp8_rdcost_mby(MACROBLOCK *mb)
  412. {
  413. int cost = 0;
  414. int b;
  415. MACROBLOCKD *x = &mb->e_mbd;
  416. ENTROPY_CONTEXT_PLANES t_above, t_left;
  417. ENTROPY_CONTEXT *ta;
  418. ENTROPY_CONTEXT *tl;
  419. vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
  420. vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
  421. ta = (ENTROPY_CONTEXT *)&t_above;
  422. tl = (ENTROPY_CONTEXT *)&t_left;
  423. for (b = 0; b < 16; b++)
  424. cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_NO_DC,
  425. ta + vp8_block2above[b], tl + vp8_block2left[b]);
  426. cost += cost_coeffs(mb, x->block + 24, PLANE_TYPE_Y2,
  427. ta + vp8_block2above[24], tl + vp8_block2left[24]);
  428. return cost;
  429. }
  430. static void macro_block_yrd( MACROBLOCK *mb,
  431. int *Rate,
  432. int *Distortion,
  433. const vp8_encodemb_rtcd_vtable_t *rtcd)
  434. {
  435. int b;
  436. MACROBLOCKD *const x = &mb->e_mbd;
  437. BLOCK *const mb_y2 = mb->block + 24;
  438. BLOCKD *const x_y2 = x->block + 24;
  439. short *Y2DCPtr = mb_y2->src_diff;
  440. BLOCK *beptr;
  441. int d;
  442. ENCODEMB_INVOKE(rtcd, submby)( mb->src_diff, *(mb->block[0].base_src),
  443. mb->e_mbd.predictor, mb->block[0].src_stride );
  444. // Fdct and building the 2nd order block
  445. for (beptr = mb->block; beptr < mb->block + 16; beptr += 2)
  446. {
  447. mb->vp8_short_fdct8x4(beptr->src_diff, beptr->coeff, 32);
  448. *Y2DCPtr++ = beptr->coeff[0];
  449. *Y2DCPtr++ = beptr->coeff[16];
  450. }
  451. // 2nd order fdct
  452. mb->short_walsh4x4(mb_y2->src_diff, mb_y2->coeff, 8);
  453. // Quantization
  454. for (b = 0; b < 16; b++)
  455. {
  456. mb->quantize_b(&mb->block[b], &mb->e_mbd.block[b]);
  457. }
  458. // DC predication and Quantization of 2nd Order block
  459. mb->quantize_b(mb_y2, x_y2);
  460. // Distortion
  461. d = ENCODEMB_INVOKE(rtcd, mberr)(mb, 1) << 2;
  462. d += ENCODEMB_INVOKE(rtcd, berr)(mb_y2->coeff, x_y2->dqcoeff);
  463. *Distortion = (d >> 4);
  464. // rate
  465. *Rate = vp8_rdcost_mby(mb);
  466. }
  467. static void copy_predictor(unsigned char *dst, const unsigned char *predictor)
  468. {
  469. const unsigned int *p = (const unsigned int *)predictor;
  470. unsigned int *d = (unsigned int *)dst;
  471. d[0] = p[0];
  472. d[4] = p[4];
  473. d[8] = p[8];
  474. d[12] = p[12];
  475. }
  476. static int rd_pick_intra4x4block(
  477. VP8_COMP *cpi,
  478. MACROBLOCK *x,
  479. BLOCK *be,
  480. BLOCKD *b,
  481. B_PREDICTION_MODE *best_mode,
  482. unsigned int *bmode_costs,
  483. ENTROPY_CONTEXT *a,
  484. ENTROPY_CONTEXT *l,
  485. int *bestrate,
  486. int *bestratey,
  487. int *bestdistortion)
  488. {
  489. B_PREDICTION_MODE mode;
  490. int best_rd = INT_MAX;
  491. int rate = 0;
  492. int distortion;
  493. ENTROPY_CONTEXT ta = *a, tempa = *a;
  494. ENTROPY_CONTEXT tl = *l, templ = *l;
  495. /*
  496. * The predictor buffer is a 2d buffer with a stride of 16. Create
  497. * a temp buffer that meets the stride requirements, but we are only
  498. * interested in the left 4x4 block
  499. * */
  500. DECLARE_ALIGNED_ARRAY(16, unsigned char, best_predictor, 16*4);
  501. DECLARE_ALIGNED_ARRAY(16, short, best_dqcoeff, 16);
  502. for (mode = B_DC_PRED; mode <= B_HU_PRED; mode++)
  503. {
  504. int this_rd;
  505. int ratey;
  506. rate = bmode_costs[mode];
  507. RECON_INVOKE(&cpi->rtcd.common->recon, intra4x4_predict)
  508. (b, mode, b->predictor);
  509. ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), subb)(be, b, 16);
  510. x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
  511. x->quantize_b(be, b);
  512. tempa = ta;
  513. templ = tl;
  514. ratey = cost_coeffs(x, b, PLANE_TYPE_Y_WITH_DC, &tempa, &templ);
  515. rate += ratey;
  516. distortion = ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), berr)(be->coeff, b->dqcoeff) >> 2;
  517. this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
  518. if (this_rd < best_rd)
  519. {
  520. *bestrate = rate;
  521. *bestratey = ratey;
  522. *bestdistortion = distortion;
  523. best_rd = this_rd;
  524. *best_mode = mode;
  525. *a = tempa;
  526. *l = templ;
  527. copy_predictor(best_predictor, b->predictor);
  528. vpx_memcpy(best_dqcoeff, b->dqcoeff, 32);
  529. }
  530. }
  531. b->bmi.as_mode = (B_PREDICTION_MODE)(*best_mode);
  532. IDCT_INVOKE(IF_RTCD(&cpi->rtcd.common->idct), idct16)(best_dqcoeff, b->diff, 32);
  533. RECON_INVOKE(IF_RTCD(&cpi->rtcd.common->recon), recon)(best_predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
  534. return best_rd;
  535. }
  536. static int rd_pick_intra4x4mby_modes(VP8_COMP *cpi, MACROBLOCK *mb, int *Rate,
  537. int *rate_y, int *Distortion, int best_rd)
  538. {
  539. MACROBLOCKD *const xd = &mb->e_mbd;
  540. int i;
  541. int cost = mb->mbmode_cost [xd->frame_type] [B_PRED];
  542. int distortion = 0;
  543. int tot_rate_y = 0;
  544. int64_t total_rd = 0;
  545. ENTROPY_CONTEXT_PLANES t_above, t_left;
  546. ENTROPY_CONTEXT *ta;
  547. ENTROPY_CONTEXT *tl;
  548. unsigned int *bmode_costs;
  549. vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
  550. vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
  551. ta = (ENTROPY_CONTEXT *)&t_above;
  552. tl = (ENTROPY_CONTEXT *)&t_left;
  553. vp8_intra_prediction_down_copy(xd);
  554. bmode_costs = mb->inter_bmode_costs;
  555. for (i = 0; i < 16; i++)
  556. {
  557. MODE_INFO *const mic = xd->mode_info_context;
  558. const int mis = xd->mode_info_stride;
  559. B_PREDICTION_MODE UNINITIALIZED_IS_SAFE(best_mode);
  560. int UNINITIALIZED_IS_SAFE(r), UNINITIALIZED_IS_SAFE(ry), UNINITIALIZED_IS_SAFE(d);
  561. if (mb->e_mbd.frame_type == KEY_FRAME)
  562. {
  563. const B_PREDICTION_MODE A = above_block_mode(mic, i, mis);
  564. const B_PREDICTION_MODE L = left_block_mode(mic, i);
  565. bmode_costs = mb->bmode_costs[A][L];
  566. }
  567. total_rd += rd_pick_intra4x4block(
  568. cpi, mb, mb->block + i, xd->block + i, &best_mode, bmode_costs,
  569. ta + vp8_block2above[i],
  570. tl + vp8_block2left[i], &r, &ry, &d);
  571. cost += r;
  572. distortion += d;
  573. tot_rate_y += ry;
  574. mic->bmi[i].as_mode = best_mode;
  575. if(total_rd >= (int64_t)best_rd)
  576. break;
  577. }
  578. if(total_rd >= (int64_t)best_rd)
  579. return INT_MAX;
  580. *Rate = cost;
  581. *rate_y += tot_rate_y;
  582. *Distortion = distortion;
  583. return RDCOST(mb->rdmult, mb->rddiv, cost, distortion);
  584. }
  585. static int rd_pick_intra16x16mby_mode(VP8_COMP *cpi,
  586. MACROBLOCK *x,
  587. int *Rate,
  588. int *rate_y,
  589. int *Distortion)
  590. {
  591. MB_PREDICTION_MODE mode;
  592. MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
  593. int rate, ratey;
  594. int distortion;
  595. int best_rd = INT_MAX;
  596. int this_rd;
  597. //Y Search for 16x16 intra prediction mode
  598. for (mode = DC_PRED; mode <= TM_PRED; mode++)
  599. {
  600. x->e_mbd.mode_info_context->mbmi.mode = mode;
  601. RECON_INVOKE(&cpi->common.rtcd.recon, build_intra_predictors_mby)
  602. (&x->e_mbd);
  603. macro_block_yrd(x, &ratey, &distortion, IF_RTCD(&cpi->rtcd.encodemb));
  604. rate = ratey + x->mbmode_cost[x->e_mbd.frame_type]
  605. [x->e_mbd.mode_info_context->mbmi.mode];
  606. this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
  607. if (this_rd < best_rd)
  608. {
  609. mode_selected = mode;
  610. best_rd = this_rd;
  611. *Rate = rate;
  612. *rate_y = ratey;
  613. *Distortion = distortion;
  614. }
  615. }
  616. x->e_mbd.mode_info_context->mbmi.mode = mode_selected;
  617. return best_rd;
  618. }
  619. static int rd_cost_mbuv(MACROBLOCK *mb)
  620. {
  621. int b;
  622. int cost = 0;
  623. MACROBLOCKD *x = &mb->e_mbd;
  624. ENTROPY_CONTEXT_PLANES t_above, t_left;
  625. ENTROPY_CONTEXT *ta;
  626. ENTROPY_CONTEXT *tl;
  627. vpx_memcpy(&t_above, mb->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
  628. vpx_memcpy(&t_left, mb->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
  629. ta = (ENTROPY_CONTEXT *)&t_above;
  630. tl = (ENTROPY_CONTEXT *)&t_left;
  631. for (b = 16; b < 24; b++)
  632. cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_UV,
  633. ta + vp8_block2above[b], tl + vp8_block2left[b]);
  634. return cost;
  635. }
  636. static int vp8_rd_inter_uv(VP8_COMP *cpi, MACROBLOCK *x, int *rate, int *distortion, int fullpixel)
  637. {
  638. vp8_build_uvmvs(&x->e_mbd, fullpixel);
  639. vp8_encode_inter16x16uvrd(IF_RTCD(&cpi->rtcd), x);
  640. *rate = rd_cost_mbuv(x);
  641. *distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
  642. return RDCOST(x->rdmult, x->rddiv, *rate, *distortion);
  643. }
  644. static void rd_pick_intra_mbuv_mode(VP8_COMP *cpi, MACROBLOCK *x, int *rate, int *rate_tokenonly, int *distortion)
  645. {
  646. MB_PREDICTION_MODE mode;
  647. MB_PREDICTION_MODE UNINITIALIZED_IS_SAFE(mode_selected);
  648. int best_rd = INT_MAX;
  649. int UNINITIALIZED_IS_SAFE(d), UNINITIALIZED_IS_SAFE(r);
  650. int rate_to;
  651. for (mode = DC_PRED; mode <= TM_PRED; mode++)
  652. {
  653. int rate;
  654. int distortion;
  655. int this_rd;
  656. x->e_mbd.mode_info_context->mbmi.uv_mode = mode;
  657. RECON_INVOKE(&cpi->rtcd.common->recon, build_intra_predictors_mbuv)
  658. (&x->e_mbd);
  659. ENCODEMB_INVOKE(IF_RTCD(&cpi->rtcd.encodemb), submbuv)(x->src_diff,
  660. x->src.u_buffer, x->src.v_buffer, x->e_mbd.predictor,
  661. x->src.uv_stride);
  662. vp8_transform_mbuv(x);
  663. vp8_quantize_mbuv(x);
  664. rate_to = rd_cost_mbuv(x);
  665. rate = rate_to + x->intra_uv_mode_cost[x->e_mbd.frame_type][x->e_mbd.mode_info_context->mbmi.uv_mode];
  666. distortion = ENCODEMB_INVOKE(&cpi->rtcd.encodemb, mbuverr)(x) / 4;
  667. this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
  668. if (this_rd < best_rd)
  669. {
  670. best_rd = this_rd;
  671. d = distortion;
  672. r = rate;
  673. *rate_tokenonly = rate_to;
  674. mode_selected = mode;
  675. }
  676. }
  677. *rate = r;
  678. *distortion = d;
  679. x->e_mbd.mode_info_context->mbmi.uv_mode = mode_selected;
  680. }
  681. int vp8_cost_mv_ref(MB_PREDICTION_MODE m, const int near_mv_ref_ct[4])
  682. {
  683. vp8_prob p [VP8_MVREFS-1];
  684. assert(NEARESTMV <= m && m <= SPLITMV);
  685. vp8_mv_ref_probs(p, near_mv_ref_ct);
  686. return vp8_cost_token(vp8_mv_ref_tree, p,
  687. vp8_mv_ref_encoding_array - NEARESTMV + m);
  688. }
  689. void vp8_set_mbmode_and_mvs(MACROBLOCK *x, MB_PREDICTION_MODE mb, int_mv *mv)
  690. {
  691. x->e_mbd.mode_info_context->mbmi.mode = mb;
  692. x->e_mbd.mode_info_context->mbmi.mv.as_int = mv->as_int;
  693. }
  694. static int labels2mode(
  695. MACROBLOCK *x,
  696. int const *labelings, int which_label,
  697. B_PREDICTION_MODE this_mode,
  698. int_mv *this_mv, int_mv *best_ref_mv,
  699. int *mvcost[2]
  700. )
  701. {
  702. MACROBLOCKD *const xd = & x->e_mbd;
  703. MODE_INFO *const mic = xd->mode_info_context;
  704. const int mis = xd->mode_info_stride;
  705. int cost = 0;
  706. int thismvcost = 0;
  707. /* We have to be careful retrieving previously-encoded motion vectors.
  708. Ones from this macroblock have to be pulled from the BLOCKD array
  709. as they have not yet made it to the bmi array in our MB_MODE_INFO. */
  710. int i = 0;
  711. do
  712. {
  713. BLOCKD *const d = xd->block + i;
  714. const int row = i >> 2, col = i & 3;
  715. B_PREDICTION_MODE m;
  716. if (labelings[i] != which_label)
  717. continue;
  718. if (col && labelings[i] == labelings[i-1])
  719. m = LEFT4X4;
  720. else if (row && labelings[i] == labelings[i-4])
  721. m = ABOVE4X4;
  722. else
  723. {
  724. // the only time we should do costing for new motion vector or mode
  725. // is when we are on a new label (jbb May 08, 2007)
  726. switch (m = this_mode)
  727. {
  728. case NEW4X4 :
  729. thismvcost = vp8_mv_bit_cost(this_mv, best_ref_mv, mvcost, 102);
  730. break;
  731. case LEFT4X4:
  732. this_mv->as_int = col ? d[-1].bmi.mv.as_int : left_block_mv(mic, i);
  733. break;
  734. case ABOVE4X4:
  735. this_mv->as_int = row ? d[-4].bmi.mv.as_int : above_block_mv(mic, i, mis);
  736. break;
  737. case ZERO4X4:
  738. this_mv->as_int = 0;
  739. break;
  740. default:
  741. break;
  742. }
  743. if (m == ABOVE4X4) // replace above with left if same
  744. {
  745. int_mv left_mv;
  746. left_mv.as_int = col ? d[-1].bmi.mv.as_int :
  747. left_block_mv(mic, i);
  748. if (left_mv.as_int == this_mv->as_int)
  749. m = LEFT4X4;
  750. }
  751. cost = x->inter_bmode_costs[ m];
  752. }
  753. d->bmi.mv.as_int = this_mv->as_int;
  754. x->partition_info->bmi[i].mode = m;
  755. x->partition_info->bmi[i].mv.as_int = this_mv->as_int;
  756. }
  757. while (++i < 16);
  758. cost += thismvcost ;
  759. return cost;
  760. }
  761. static int rdcost_mbsegment_y(MACROBLOCK *mb, const int *labels,
  762. int which_label, ENTROPY_CONTEXT *ta,
  763. ENTROPY_CONTEXT *tl)
  764. {
  765. int cost = 0;
  766. int b;
  767. MACROBLOCKD *x = &mb->e_mbd;
  768. for (b = 0; b < 16; b++)
  769. if (labels[ b] == which_label)
  770. cost += cost_coeffs(mb, x->block + b, PLANE_TYPE_Y_WITH_DC,
  771. ta + vp8_block2above[b],
  772. tl + vp8_block2left[b]);
  773. return cost;
  774. }
  775. static unsigned int vp8_encode_inter_mb_segment(MACROBLOCK *x, int const *labels, int which_label, const vp8_encodemb_rtcd_vtable_t *rtcd)
  776. {
  777. int i;
  778. unsigned int distortion = 0;
  779. for (i = 0; i < 16; i++)
  780. {
  781. if (labels[i] == which_label)
  782. {
  783. BLOCKD *bd = &x->e_mbd.block[i];
  784. BLOCK *be = &x->block[i];
  785. vp8_build_inter_predictors_b(bd, 16, x->e_mbd.subpixel_predict);
  786. ENCODEMB_INVOKE(rtcd, subb)(be, bd, 16);
  787. x->vp8_short_fdct4x4(be->src_diff, be->coeff, 32);
  788. // set to 0 no way to account for 2nd order DC so discount
  789. //be->coeff[0] = 0;
  790. x->quantize_b(be, bd);
  791. distortion += ENCODEMB_INVOKE(rtcd, berr)(be->coeff, bd->dqcoeff);
  792. }
  793. }
  794. return distortion;
  795. }
  796. static const unsigned int segmentation_to_sseshift[4] = {3, 3, 2, 0};
  797. typedef struct
  798. {
  799. int_mv *ref_mv;
  800. int_mv mvp;
  801. int segment_rd;
  802. int segment_num;
  803. int r;
  804. int d;
  805. int segment_yrate;
  806. B_PREDICTION_MODE modes[16];
  807. int_mv mvs[16];
  808. unsigned char eobs[16];
  809. int mvthresh;
  810. int *mdcounts;
  811. int_mv sv_mvp[4]; // save 4 mvp from 8x8
  812. int sv_istep[2]; // save 2 initial step_param for 16x8/8x16
  813. } BEST_SEG_INFO;
  814. static void rd_check_segment(VP8_COMP *cpi, MACROBLOCK *x,
  815. BEST_SEG_INFO *bsi, unsigned int segmentation)
  816. {
  817. int i;
  818. int const *labels;
  819. int br = 0;
  820. int bd = 0;
  821. B_PREDICTION_MODE this_mode;
  822. int label_count;
  823. int this_segment_rd = 0;
  824. int label_mv_thresh;
  825. int rate = 0;
  826. int sbr = 0;
  827. int sbd = 0;
  828. int segmentyrate = 0;
  829. vp8_variance_fn_ptr_t *v_fn_ptr;
  830. ENTROPY_CONTEXT_PLANES t_above, t_left;
  831. ENTROPY_CONTEXT *ta;
  832. ENTROPY_CONTEXT *tl;
  833. ENTROPY_CONTEXT_PLANES t_above_b, t_left_b;
  834. ENTROPY_CONTEXT *ta_b;
  835. ENTROPY_CONTEXT *tl_b;
  836. vpx_memcpy(&t_above, x->e_mbd.above_context, sizeof(ENTROPY_CONTEXT_PLANES));
  837. vpx_memcpy(&t_left, x->e_mbd.left_context, sizeof(ENTROPY_CONTEXT_PLANES));
  838. ta = (ENTROPY_CONTEXT *)&t_above;
  839. tl = (ENTROPY_CONTEXT *)&t_left;
  840. ta_b = (ENTROPY_CONTEXT *)&t_above_b;
  841. tl_b = (ENTROPY_CONTEXT *)&t_left_b;
  842. br = 0;
  843. bd = 0;
  844. v_fn_ptr = &cpi->fn_ptr[segmentation];
  845. labels = vp8_mbsplits[segmentation];
  846. label_count = vp8_mbsplit_count[segmentation];
  847. // 64 makes this threshold really big effectively
  848. // making it so that we very rarely check mvs on
  849. // segments. setting this to 1 would make mv thresh
  850. // roughly equal to what it is for macroblocks
  851. label_mv_thresh = 1 * bsi->mvthresh / label_count ;
  852. // Segmentation method overheads
  853. rate = vp8_cost_token(vp8_mbsplit_tree, vp8_mbsplit_probs, vp8_mbsplit_encodings + segmentation);
  854. rate += vp8_cost_mv_ref(SPLITMV, bsi->mdcounts);
  855. this_segment_rd += RDCOST(x->rdmult, x->rddiv, rate, 0);
  856. br += rate;
  857. for (i = 0; i < label_count; i++)
  858. {
  859. int_mv mode_mv[B_MODE_COUNT];
  860. int best_label_rd = INT_MAX;
  861. B_PREDICTION_MODE mode_selected = ZERO4X4;
  862. int bestlabelyrate = 0;
  863. // search for the best motion vector on this segment
  864. for (this_mode = LEFT4X4; this_mode <= NEW4X4 ; this_mode ++)
  865. {
  866. int this_rd;
  867. int distortion;
  868. int labelyrate;
  869. ENTROPY_CONTEXT_PLANES t_above_s, t_left_s;
  870. ENTROPY_CONTEXT *ta_s;
  871. ENTROPY_CONTEXT *tl_s;
  872. vpx_memcpy(&t_above_s, &t_above, sizeof(ENTROPY_CONTEXT_PLANES));
  873. vpx_memcpy(&t_left_s, &t_left, sizeof(ENTROPY_CONTEXT_PLANES));
  874. ta_s = (ENTROPY_CONTEXT *)&t_above_s;
  875. tl_s = (ENTROPY_CONTEXT *)&t_left_s;
  876. if (this_mode == NEW4X4)
  877. {
  878. int sseshift;
  879. int num00;
  880. int step_param = 0;
  881. int further_steps;
  882. int n;
  883. int thissme;
  884. int bestsme = INT_MAX;
  885. int_mv temp_mv;
  886. BLOCK *c;
  887. BLOCKD *e;
  888. // Is the best so far sufficiently good that we cant justify doing and new motion search.
  889. if (best_label_rd < label_mv_thresh)
  890. break;
  891. if(cpi->compressor_speed)
  892. {
  893. if (segmentation == BLOCK_8X16 || segmentation == BLOCK_16X8)
  894. {
  895. bsi->mvp.as_int = bsi->sv_mvp[i].as_int;
  896. if (i==1 && segmentation == BLOCK_16X8)
  897. bsi->mvp.as_int = bsi->sv_mvp[2].as_int;
  898. step_param = bsi->sv_istep[i];
  899. }
  900. // use previous block's result as next block's MV predictor.
  901. if (segmentation == BLOCK_4X4 && i>0)
  902. {
  903. bsi->mvp.as_int = x->e_mbd.block[i-1].bmi.mv.as_int;
  904. if (i==4 || i==8 || i==12)
  905. bsi->mvp.as_int = x->e_mbd.block[i-4].bmi.mv.as_int;
  906. step_param = 2;
  907. }
  908. }
  909. further_steps = (MAX_MVSEARCH_STEPS - 1) - step_param;
  910. {
  911. int sadpb = x->sadperbit4;
  912. int_mv mvp_full;
  913. mvp_full.as_mv.row = bsi->mvp.as_mv.row >>3;
  914. mvp_full.as_mv.col = bsi->mvp.as_mv.col >>3;
  915. // find first label
  916. n = vp8_mbsplit_offset[segmentation][i];
  917. c = &x->block[n];
  918. e = &x->e_mbd.block[n];
  919. {
  920. bestsme = cpi->diamond_search_sad(x, c, e, &mvp_full,
  921. &mode_mv[NEW4X4], step_param,
  922. sadpb, &num00, v_fn_ptr,
  923. x->mvcost, bsi->ref_mv);
  924. n = num00;
  925. num00 = 0;
  926. while (n < further_steps)
  927. {
  928. n++;
  929. if (num00)
  930. num00--;
  931. else
  932. {
  933. thissme = cpi->diamond_search_sad(x, c, e,
  934. &mvp_full, &temp_mv,
  935. step_param + n, sadpb,
  936. &num00, v_fn_ptr,
  937. x->mvcost, bsi->ref_mv);
  938. if (thissme < bestsme)
  939. {
  940. bestsme = thissme;
  941. mode_mv[NEW4X4].as_int = temp_mv.as_int;
  942. }
  943. }
  944. }
  945. }
  946. sseshift = segmentation_to_sseshift[segmentation];
  947. // Should we do a full search (best quality only)
  948. if ((cpi->compressor_speed == 0) && (bestsme >> sseshift) > 4000)
  949. {
  950. /* Check if mvp_full is within the range. */
  951. vp8_clamp_mv(&mvp_full, x->mv_col_min, x->mv_col_max, x->mv_row_min, x->mv_row_max);
  952. thissme = cpi->full_search_sad(x, c, e, &mvp_full,
  953. sadpb, 16, v_fn_ptr,
  954. x->mvcost, bsi->ref_mv);
  955. if (thissme < bestsme)
  956. {
  957. bestsme = thissme;
  958. mode_mv[NEW4X4].as_int = e->bmi.mv.as_int;
  959. }
  960. else
  961. {
  962. // The full search result is actually worse so re-instate the previous best vector
  963. e->bmi.mv.as_int = mode_mv[NEW4X4].as_int;
  964. }
  965. }
  966. }
  967. if (bestsme < INT_MAX)
  968. {
  969. int distortion;
  970. unsigned int sse;
  971. cpi->find_fractional_mv_step(x, c, e, &mode_mv[NEW4X4],
  972. bsi->ref_mv, x->errorperbit, v_fn_ptr, x->mvcost,
  973. &distortion, &sse);
  974. }
  975. } /* NEW4X4 */
  976. rate = labels2mode(x, labels, i, this_mode, &mode_mv[this_mode],
  977. bsi->ref_mv, x->mvcost);
  978. // Trap vectors that reach beyond the UMV borders
  979. if (((mode_mv[this_mode].as_mv.row >> 3) < x->mv_row_min) || ((mode_mv[this_mode].as_mv.row >> 3) > x->mv_row_max) ||
  980. ((mode_mv[this_mode].as_mv.col >> 3) < x->mv_col_min) || ((mode_mv[this_mode].as_mv.col >> 3) > x->mv_col_max))
  981. {
  982. continue;
  983. }
  984. distortion = vp8_encode_inter_mb_segment(x, labels, i, IF_RTCD(&cpi->rtcd.encodemb)) / 4;
  985. labelyrate = rdcost_mbsegment_y(x, labels, i, ta_s, tl_s);
  986. rate += labelyrate;
  987. this_rd = RDCOST(x->rdmult, x->rddiv, rate, distortion);
  988. if (this_rd < best_label_rd)
  989. {
  990. sbr = rate;
  991. sbd = distortion;
  992. bestlabelyrate = labelyrate;
  993. mode_selected = this_mode;
  994. best_label_rd = this_rd;
  995. vpx_memcpy(ta_b, ta_s, sizeof(ENTROPY_CONTEXT_PLANES));
  996. vpx_memcpy(tl_b, tl_s, sizeof(ENTROPY_CONTEXT_PLANES));
  997. }
  998. } /*for each 4x4 mode*/
  999. vpx_memcpy(ta, ta_b, sizeof(ENTROPY_CONTEXT_PLANES));
  1000. vpx_memcpy(tl, tl_b, sizeof(ENTROPY_CONTEXT_PLANES));
  1001. labels2mode(x, labels, i, mode_selected, &mode_mv[mode_selected],
  1002. bsi->ref_mv, x->mvcost);
  1003. br += sbr;
  1004. bd += sbd;
  1005. segmentyrate += bestlabelyrate;
  1006. this_segment_rd += best_label_rd;
  1007. if (this_segment_rd >= bsi->segment_rd)
  1008. break;
  1009. } /* for each label */
  1010. if (this_segment_rd < bsi->segment_rd)
  1011. {
  1012. bsi->r = br;
  1013. bsi->d = bd;
  1014. bsi->segment_yrate = segmentyrate;
  1015. bsi->segment_rd = this_segment_rd;
  1016. bsi->segment_num = segmentation;
  1017. // store everything needed to come back to this!!
  1018. for (i = 0; i < 16; i++)
  1019. {
  1020. BLOCKD *bd = &x->e_mbd.block[i];
  1021. bsi->mvs[i].as_mv = x->partition_info->bmi[i].mv.as_mv;
  1022. bsi->modes[i] = x->partition_info->bmi[i].mode;
  1023. bsi->eobs[i] = bd->eob;
  1024. }
  1025. }
  1026. }
  1027. static __inline
  1028. void vp8_cal_step_param(int sr, int *sp)
  1029. {
  1030. int step = 0;
  1031. if (sr > MAX_FIRST_STEP) sr = MAX_FIRST_STEP;
  1032. else if (sr < 1) sr = 1;
  1033. while (sr>>=1)
  1034. step++;
  1035. *sp = MAX_MVSEARCH_STEPS - 1 - step;
  1036. }
  1037. static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
  1038. int_mv *best_ref_mv, int best_rd,
  1039. int *mdcounts, int *returntotrate,
  1040. int *returnyrate, int *returndistortion,
  1041. int mvthresh)
  1042. {
  1043. int i;
  1044. BEST_SEG_INFO bsi;
  1045. vpx_memset(&bsi, 0, sizeof(bsi));
  1046. bsi.segment_rd = best_rd;
  1047. bsi.ref_mv = best_ref_mv;
  1048. bsi.mvp.as_int = best_ref_mv->as_int;
  1049. bsi.mvthresh = mvthresh;
  1050. bsi.mdcounts = mdcounts;
  1051. for(i = 0; i < 16; i++)
  1052. {
  1053. bsi.modes[i] = ZERO4X4;
  1054. }
  1055. if(cpi->compressor_speed == 0)
  1056. {
  1057. /* for now, we will keep the original segmentation order
  1058. when in best quality mode */
  1059. rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
  1060. rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
  1061. rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
  1062. rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
  1063. }
  1064. else
  1065. {
  1066. int sr;
  1067. rd_check_segment(cpi, x, &bsi, BLOCK_8X8);
  1068. if (bsi.segment_rd < best_rd)
  1069. {
  1070. int col_min = (best_ref_mv->as_mv.col>>3) - MAX_FULL_PEL_VAL + ((best_ref_mv->as_mv.col & 7)?1:0);
  1071. int row_min = (best_ref_mv->as_mv.row>>3) - MAX_FULL_PEL_VAL + ((best_ref_mv->as_mv.row & 7)?1:0);
  1072. int col_max = (best_ref_mv->as_mv.col>>3) + MAX_FULL_PEL_VAL;
  1073. int row_max = (best_ref_mv->as_mv.row>>3) + MAX_FULL_PEL_VAL;
  1074. int tmp_col_min = x->mv_col_min;
  1075. int tmp_col_max = x->mv_col_max;
  1076. int tmp_row_min = x->mv_row_min;
  1077. int tmp_row_max = x->mv_row_max;
  1078. /* Get intersection of UMV window and valid MV window to reduce # of checks in diamond search. */
  1079. if (x->mv_col_min < col_min )
  1080. x->mv_col_min = col_min;
  1081. if (x->mv_col_max > col_max )
  1082. x->mv_col_max = col_max;
  1083. if (x->mv_row_min < row_min )
  1084. x->mv_row_min = row_min;
  1085. if (x->mv_row_max > row_max )
  1086. x->mv_row_max = row_max;
  1087. /* Get 8x8 result */
  1088. bsi.sv_mvp[0].as_int = bsi.mvs[0].as_int;
  1089. bsi.sv_mvp[1].as_int = bsi.mvs[2].as_int;
  1090. bsi.sv_mvp[2].as_int = bsi.mvs[8].as_int;
  1091. bsi.sv_mvp[3].as_int = bsi.mvs[10].as_int;
  1092. /* Use 8x8 result as 16x8/8x16's predictor MV. Adjust search range according to the closeness of 2 MV. */
  1093. /* block 8X16 */
  1094. {
  1095. sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[2].as_mv.row))>>3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[2].as_mv.col))>>3);
  1096. vp8_cal_step_param(sr, &bsi.sv_istep[0]);
  1097. sr = MAXF((abs(bsi.sv_mvp[1].as_mv.row - bsi.sv_mvp[3].as_mv.row))>>3, (abs(bsi.sv_mvp[1].as_mv.col - bsi.sv_mvp[3].as_mv.col))>>3);
  1098. vp8_cal_step_param(sr, &bsi.sv_istep[1]);
  1099. rd_check_segment(cpi, x, &bsi, BLOCK_8X16);
  1100. }
  1101. /* block 16X8 */
  1102. {
  1103. sr = MAXF((abs(bsi.sv_mvp[0].as_mv.row - bsi.sv_mvp[1].as_mv.row))>>3, (abs(bsi.sv_mvp[0].as_mv.col - bsi.sv_mvp[1].as_mv.col))>>3);
  1104. vp8_cal_step_param(sr, &bsi.sv_istep[0]);
  1105. sr = MAXF((abs(bsi.sv_mvp[2].as_mv.row - bsi.sv_mvp[3].as_mv.row))>>3, (abs(bsi.sv_mvp[2].as_mv.col - bsi.sv_mvp[3].as_mv.col))>>3);
  1106. vp8_cal_step_param(sr, &bsi.sv_istep[1]);
  1107. rd_check_segment(cpi, x, &bsi, BLOCK_16X8);
  1108. }
  1109. /* If 8x8 is better than 16x8/8x16, then do 4x4 search */
  1110. /* Not skip 4x4 if speed=0 (good quality) */
  1111. if (cpi->sf.no_skip_block4x4_search || bsi.segment_num == BLOCK_8X8) /* || (sv_segment_rd8x8-bsi.segment_rd) < sv_segment_rd8x8>>5) */
  1112. {
  1113. bsi.mvp.as_int = bsi.sv_mvp[0].as_int;
  1114. rd_check_segment(cpi, x, &bsi, BLOCK_4X4);
  1115. }
  1116. /* restore UMV window */
  1117. x->mv_col_min = tmp_col_min;
  1118. x->mv_col_max = tmp_col_max;
  1119. x->mv_row_min = tmp_row_min;
  1120. x->mv_row_max = tmp_row_max;
  1121. }
  1122. }
  1123. /* set it to the best */
  1124. for (i = 0; i < 16; i++)
  1125. {
  1126. BLOCKD *bd = &x->e_mbd.block[i];
  1127. bd->bmi.mv.as_int = bsi.mvs[i].as_int;
  1128. bd->eob = bsi.eobs[i];
  1129. }
  1130. *returntotrate = bsi.r;
  1131. *returndistortion = bsi.d;
  1132. *returnyrate = bsi.segment_yrate;
  1133. /* save partitions */
  1134. x->e_mbd.mode_info_context->mbmi.partitioning = bsi.segment_num;
  1135. x->partition_info->count = vp8_mbsplit_count[bsi.segment_num];
  1136. for (i = 0; i < x->partition_info->count; i++)
  1137. {
  1138. int j;
  1139. j = vp8_mbsplit_offset[bsi.segment_num][i];
  1140. x->partition_info->bmi[i].mode = bsi.modes[j];
  1141. x->partition_info->bmi[i].mv.as_mv = bsi.mvs[j].as_mv;
  1142. }
  1143. /*
  1144. * used to set x->e_mbd.mode_info_context->mbmi.mv.as_int
  1145. */
  1146. x->partition_info->bmi[15].mv.as_int = bsi.mvs[15].as_int;
  1147. return bsi.segment_rd;
  1148. }
  1149. static void insertsortmv(int arr[], int len)
  1150. {
  1151. int i, j, k;
  1152. for ( i = 1 ; i <= len-1 ; i++ )
  1153. {
  1154. for ( j = 0 ; j < i ; j++ )
  1155. {
  1156. if ( arr[j] > arr[i] )
  1157. {
  1158. int temp;
  1159. temp = arr[i];
  1160. for ( k = i; k >j; k--)
  1161. arr[k] = arr[k - 1] ;
  1162. arr[j] = temp ;
  1163. }
  1164. }
  1165. }
  1166. }
  1167. static void insertsortsad(int arr[],int idx[], int len)
  1168. {
  1169. int i, j, k;
  1170. for ( i = 1 ; i <= len-1 ; i++ )
  1171. {
  1172. for ( j = 0 ; j < i ; j++ )
  1173. {
  1174. if ( arr[j] > arr[i] )
  1175. {
  1176. int temp, tempi;
  1177. temp = arr[i];
  1178. tempi = idx[i];
  1179. for ( k = i; k >j; k--)
  1180. {
  1181. arr[k] = arr[k - 1] ;
  1182. idx[k] = idx[k - 1];
  1183. }
  1184. arr[j] = temp ;
  1185. idx[j] = tempi;
  1186. }
  1187. }
  1188. }
  1189. }
  1190. //The improved MV prediction
  1191. void vp8_mv_pred
  1192. (
  1193. VP8_COMP *cpi,
  1194. MACROBLOCKD *xd,
  1195. const MODE_INFO *here,
  1196. int_mv *mvp,
  1197. int refframe,
  1198. int *ref_frame_sign_bias,
  1199. int *sr,
  1200. int near_sadidx[]
  1201. )
  1202. {
  1203. const MODE_INFO *above = here - xd->mode_info_stride;
  1204. const MODE_INFO *left = here - 1;
  1205. const MODE_INFO *aboveleft = above - 1;
  1206. int_mv near_mvs[8];
  1207. int near_ref[8];
  1208. int_mv mv;
  1209. int vcnt=0;
  1210. int find=0;
  1211. int mb_offset;
  1212. int mvx[8];
  1213. int mvy[8];
  1214. int i;
  1215. mv.as_int = 0;
  1216. if(here->mbmi.ref_frame != INTRA_FRAME)
  1217. {
  1218. near_mvs[0].as_int = near_mvs[1].as_int = near_mvs[2].as_int = near_mvs[3].as_int = near_mvs[4].as_int = near_mvs[5].as_int = near_mvs[6].as_int = near_mvs[7].as_int = 0;
  1219. near_ref[0] = near_ref[1] = near_ref[2] = near_ref[3] = near_ref[4] = near_ref[5] = near_ref[6] = near_ref[7] = 0;
  1220. // read in 3 nearby block's MVs from current frame as prediction candidates.
  1221. if (above->mbmi.ref_frame != INTRA_FRAME)
  1222. {
  1223. near_mvs[vcnt].as_int = above->mbmi.mv.as_int;
  1224. mv_bias(ref_frame_sign_bias[above->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
  1225. near_ref[vcnt] = above->mbmi.ref_frame;
  1226. }
  1227. vcnt++;
  1228. if (left->mbmi.ref_frame != INTRA_FRAME)
  1229. {
  1230. near_mvs[vcnt].as_int = left->mbmi.mv.as_int;
  1231. mv_bias(ref_frame_sign_bias[left->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
  1232. near_ref[vcnt] = left->mbmi.ref_frame;
  1233. }
  1234. vcnt++;
  1235. if (aboveleft->mbmi.ref_frame != INTRA_FRAME)
  1236. {
  1237. near_mvs[vcnt].as_int = aboveleft->mbmi.mv.as_int;
  1238. mv_bias(ref_frame_sign_bias[aboveleft->mbmi.ref_frame], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
  1239. near_ref[vcnt] = aboveleft->mbmi.ref_frame;
  1240. }
  1241. vcnt++;
  1242. // read in 5 nearby block's MVs from last frame.
  1243. if(cpi->common.last_frame_type != KEY_FRAME)
  1244. {
  1245. mb_offset = (-xd->mb_to_top_edge/128 + 1) * (xd->mode_info_stride +1) + (-xd->mb_to_left_edge/128 +1) ;
  1246. // current in last frame
  1247. if (cpi->lf_ref_frame[mb_offset] != INTRA_FRAME)
  1248. {
  1249. near_mvs[vcnt].as_int = cpi->lfmv[mb_offset].as_int;
  1250. mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
  1251. near_ref[vcnt] = cpi->lf_ref_frame[mb_offset];
  1252. }
  1253. vcnt++;
  1254. // above in last frame
  1255. if (cpi->lf_ref_frame[mb_offset - xd->mode_info_stride-1] != INTRA_FRAME)
  1256. {
  1257. near_mvs[vcnt].as_int = cpi->lfmv[mb_offset - xd->mode_info_stride-1].as_int;
  1258. mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset - xd->mode_info_stride-1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
  1259. near_ref[vcnt] = cpi->lf_ref_frame[mb_offset - xd->mode_info_stride-1];
  1260. }
  1261. vcnt++;
  1262. // left in last frame
  1263. if (cpi->lf_ref_frame[mb_offset-1] != INTRA_FRAME)
  1264. {
  1265. near_mvs[vcnt].as_int = cpi->lfmv[mb_offset -1].as_int;
  1266. mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset -1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
  1267. near_ref[vcnt] = cpi->lf_ref_frame[mb_offset - 1];
  1268. }
  1269. vcnt++;
  1270. // right in last frame
  1271. if (cpi->lf_ref_frame[mb_offset +1] != INTRA_FRAME)
  1272. {
  1273. near_mvs[vcnt].as_int = cpi->lfmv[mb_offset +1].as_int;
  1274. mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset +1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
  1275. near_ref[vcnt] = cpi->lf_ref_frame[mb_offset +1];
  1276. }
  1277. vcnt++;
  1278. // below in last frame
  1279. if (cpi->lf_ref_frame[mb_offset + xd->mode_info_stride +1] != INTRA_FRAME)
  1280. {
  1281. near_mvs[vcnt].as_int = cpi->lfmv[mb_offset + xd->mode_info_stride +1].as_int;
  1282. mv_bias(cpi->lf_ref_frame_sign_bias[mb_offset + xd->mode_info_stride +1], refframe, &near_mvs[vcnt], ref_frame_sign_bias);
  1283. near_ref[vcnt] = cpi->lf_ref_frame[mb_offset + xd->mode_info_stride +1];
  1284. }
  1285. vcnt++;
  1286. }
  1287. for(i=0; i< vcnt; i++)
  1288. {
  1289. if(near_ref[near_sadidx[i]] != INTRA_FRAME)
  1290. {
  1291. if(here->mbmi.ref_frame == near_ref[near_sadidx[i]])
  1292. {
  1293. mv.as_int = near_mvs[near_sadidx[i]].as_int;
  1294. find = 1;
  1295. if (i < 3)
  1296. *sr = 3;
  1297. else
  1298. *sr = 2;
  1299. break;
  1300. }
  1301. }
  1302. }
  1303. if(!find)
  1304. {
  1305. for(i=0; i<vcnt; i++)
  1306. {
  1307. mvx[i] = near_mvs[i].as_mv.row;
  1308. mvy[i] = near_mvs[i].as_mv.col;
  1309. }
  1310. insertsortmv(mvx, vcnt);
  1311. insertsortmv(mvy, vcnt);
  1312. mv.as_mv.row = mvx[vcnt/2];
  1313. mv.as_mv.col = mvy[vcnt/2];
  1314. find = 1;
  1315. //sr is set to 0 to allow calling function to decide the search range.
  1316. *sr = 0;
  1317. }
  1318. }
  1319. /* Set up return values */
  1320. mvp->as_int = mv.as_int;
  1321. vp8_clamp_mv2(mvp, xd);
  1322. }
  1323. void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffset, int near_sadidx[])
  1324. {
  1325. int near_sad[8] = {0}; // 0-cf above, 1-cf left, 2-cf aboveleft, 3-lf current, 4-lf above, 5-lf left, 6-lf right, 7-lf below
  1326. BLOCK *b = &x->block[0];
  1327. unsigned char *src_y_ptr = *(b->base_src);
  1328. //calculate sad for current frame 3 nearby MBs.
  1329. if( xd->mb_to_top_edge==0 && xd->mb_to_left_edge ==0)
  1330. {
  1331. near_sad[0] = near_sad[1] = near_sad[2] = INT_MAX;
  1332. }else if(xd->mb_to_top_edge==0)
  1333. { //only has left MB for sad calculation.
  1334. near_sad[0] = near_sad[2] = INT_MAX;
  1335. near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - 16,xd->dst.y_stride, 0x7fffffff);
  1336. }else if(xd->mb_to_left_edge ==0)
  1337. { //only has left MB for sad calculation.
  1338. near_sad[1] = near_sad[2] = INT_MAX;
  1339. near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride *16,xd->dst.y_stride, 0x7fffffff);
  1340. }else
  1341. {
  1342. near_sad[0] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride *16,xd->dst.y_stride, 0x7fffffff);
  1343. near_sad[1] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - 16,xd->dst.y_stride, 0x7fffffff);
  1344. near_sad[2] = cpi->fn_ptr[BLOCK_16X16].sdf(src_y_ptr, b->src_stride, xd->dst.y_buffer - xd->dst.y_stride *16 -16,xd->dst.y_stride, 0x7fffffff);
  1345. }
  1346. if(cpi->common.last_frame_type != KEY_FRAME)
  1347. {
  1348. //calculate sad for last frame 5 nearby MBs.
  1349. unsigned char *pre_y_buffer = cpi->co