PageRenderTime 148ms CodeModel.GetById 21ms RepoModel.GetById 1ms app.codeStats 0ms

/xbmc/cores/dvdplayer/Codecs/ffmpeg/libavcodec/imgconvert.c

https://github.com/gusax/plex
C | 3002 lines | 2648 code | 236 blank | 118 comment | 297 complexity | b36766a71b94bfe60653f88ab89a21fb MD5 | raw file
Possible License(s): Unlicense, AGPL-1.0, GPL-2.0, LGPL-2.1, GPL-3.0

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * Misc image conversion routines
  3. * Copyright (c) 2001, 2002, 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file libavcodec/imgconvert.c
  23. * misc image conversion routines
  24. */
  25. /* TODO:
  26. * - write 'ffimg' program to test all the image related stuff
  27. * - move all api to slice based system
  28. * - integrate deinterlacing, postprocessing and scaling in the conversion process
  29. */
  30. #include "avcodec.h"
  31. #include "dsputil.h"
  32. #include "colorspace.h"
  33. #if HAVE_MMX
  34. #include "x86/mmx.h"
  35. #include "x86/dsputil_mmx.h"
  36. #endif
  37. #define xglue(x, y) x ## y
  38. #define glue(x, y) xglue(x, y)
  39. #define FF_COLOR_RGB 0 /**< RGB color space */
  40. #define FF_COLOR_GRAY 1 /**< gray color space */
  41. #define FF_COLOR_YUV 2 /**< YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */
  42. #define FF_COLOR_YUV_JPEG 3 /**< YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */
  43. #define FF_PIXEL_PLANAR 0 /**< each channel has one component in AVPicture */
  44. #define FF_PIXEL_PACKED 1 /**< only one components containing all the channels */
  45. #define FF_PIXEL_PALETTE 2 /**< one components containing indexes for a palette */
  46. typedef struct PixFmtInfo {
  47. const char *name;
  48. uint8_t nb_channels; /**< number of channels (including alpha) */
  49. uint8_t color_type; /**< color type (see FF_COLOR_xxx constants) */
  50. uint8_t pixel_type; /**< pixel storage type (see FF_PIXEL_xxx constants) */
  51. uint8_t is_alpha : 1; /**< true if alpha can be specified */
  52. uint8_t is_hwaccel : 1; /**< true if this is an HW accelerated format */
  53. uint8_t x_chroma_shift; /**< X chroma subsampling factor is 2 ^ shift */
  54. uint8_t y_chroma_shift; /**< Y chroma subsampling factor is 2 ^ shift */
  55. uint8_t depth; /**< bit depth of the color components */
  56. } PixFmtInfo;
  57. /* this table gives more information about formats */
  58. static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
  59. /* YUV formats */
  60. [PIX_FMT_YUV420P] = {
  61. .name = "yuv420p",
  62. .nb_channels = 3,
  63. .color_type = FF_COLOR_YUV,
  64. .pixel_type = FF_PIXEL_PLANAR,
  65. .depth = 8,
  66. .x_chroma_shift = 1, .y_chroma_shift = 1,
  67. },
  68. [PIX_FMT_YUV422P] = {
  69. .name = "yuv422p",
  70. .nb_channels = 3,
  71. .color_type = FF_COLOR_YUV,
  72. .pixel_type = FF_PIXEL_PLANAR,
  73. .depth = 8,
  74. .x_chroma_shift = 1, .y_chroma_shift = 0,
  75. },
  76. [PIX_FMT_YUV444P] = {
  77. .name = "yuv444p",
  78. .nb_channels = 3,
  79. .color_type = FF_COLOR_YUV,
  80. .pixel_type = FF_PIXEL_PLANAR,
  81. .depth = 8,
  82. .x_chroma_shift = 0, .y_chroma_shift = 0,
  83. },
  84. [PIX_FMT_YUYV422] = {
  85. .name = "yuyv422",
  86. .nb_channels = 1,
  87. .color_type = FF_COLOR_YUV,
  88. .pixel_type = FF_PIXEL_PACKED,
  89. .depth = 8,
  90. .x_chroma_shift = 1, .y_chroma_shift = 0,
  91. },
  92. [PIX_FMT_UYVY422] = {
  93. .name = "uyvy422",
  94. .nb_channels = 1,
  95. .color_type = FF_COLOR_YUV,
  96. .pixel_type = FF_PIXEL_PACKED,
  97. .depth = 8,
  98. .x_chroma_shift = 1, .y_chroma_shift = 0,
  99. },
  100. [PIX_FMT_YUV410P] = {
  101. .name = "yuv410p",
  102. .nb_channels = 3,
  103. .color_type = FF_COLOR_YUV,
  104. .pixel_type = FF_PIXEL_PLANAR,
  105. .depth = 8,
  106. .x_chroma_shift = 2, .y_chroma_shift = 2,
  107. },
  108. [PIX_FMT_YUV411P] = {
  109. .name = "yuv411p",
  110. .nb_channels = 3,
  111. .color_type = FF_COLOR_YUV,
  112. .pixel_type = FF_PIXEL_PLANAR,
  113. .depth = 8,
  114. .x_chroma_shift = 2, .y_chroma_shift = 0,
  115. },
  116. [PIX_FMT_YUV440P] = {
  117. .name = "yuv440p",
  118. .nb_channels = 3,
  119. .color_type = FF_COLOR_YUV,
  120. .pixel_type = FF_PIXEL_PLANAR,
  121. .depth = 8,
  122. .x_chroma_shift = 0, .y_chroma_shift = 1,
  123. },
  124. /* YUV formats with alpha plane */
  125. [PIX_FMT_YUVA420P] = {
  126. .name = "yuva420p",
  127. .nb_channels = 4,
  128. .color_type = FF_COLOR_YUV,
  129. .pixel_type = FF_PIXEL_PLANAR,
  130. .depth = 8,
  131. .x_chroma_shift = 1, .y_chroma_shift = 1,
  132. },
  133. /* JPEG YUV */
  134. [PIX_FMT_YUVJ420P] = {
  135. .name = "yuvj420p",
  136. .nb_channels = 3,
  137. .color_type = FF_COLOR_YUV_JPEG,
  138. .pixel_type = FF_PIXEL_PLANAR,
  139. .depth = 8,
  140. .x_chroma_shift = 1, .y_chroma_shift = 1,
  141. },
  142. [PIX_FMT_YUVJ422P] = {
  143. .name = "yuvj422p",
  144. .nb_channels = 3,
  145. .color_type = FF_COLOR_YUV_JPEG,
  146. .pixel_type = FF_PIXEL_PLANAR,
  147. .depth = 8,
  148. .x_chroma_shift = 1, .y_chroma_shift = 0,
  149. },
  150. [PIX_FMT_YUVJ444P] = {
  151. .name = "yuvj444p",
  152. .nb_channels = 3,
  153. .color_type = FF_COLOR_YUV_JPEG,
  154. .pixel_type = FF_PIXEL_PLANAR,
  155. .depth = 8,
  156. .x_chroma_shift = 0, .y_chroma_shift = 0,
  157. },
  158. [PIX_FMT_YUVJ440P] = {
  159. .name = "yuvj440p",
  160. .nb_channels = 3,
  161. .color_type = FF_COLOR_YUV_JPEG,
  162. .pixel_type = FF_PIXEL_PLANAR,
  163. .depth = 8,
  164. .x_chroma_shift = 0, .y_chroma_shift = 1,
  165. },
  166. /* RGB formats */
  167. [PIX_FMT_RGB24] = {
  168. .name = "rgb24",
  169. .nb_channels = 3,
  170. .color_type = FF_COLOR_RGB,
  171. .pixel_type = FF_PIXEL_PACKED,
  172. .depth = 8,
  173. .x_chroma_shift = 0, .y_chroma_shift = 0,
  174. },
  175. [PIX_FMT_BGR24] = {
  176. .name = "bgr24",
  177. .nb_channels = 3,
  178. .color_type = FF_COLOR_RGB,
  179. .pixel_type = FF_PIXEL_PACKED,
  180. .depth = 8,
  181. .x_chroma_shift = 0, .y_chroma_shift = 0,
  182. },
  183. [PIX_FMT_RGB32] = {
  184. .name = "rgb32",
  185. .nb_channels = 4, .is_alpha = 1,
  186. .color_type = FF_COLOR_RGB,
  187. .pixel_type = FF_PIXEL_PACKED,
  188. .depth = 8,
  189. .x_chroma_shift = 0, .y_chroma_shift = 0,
  190. },
  191. [PIX_FMT_RGB48BE] = {
  192. .name = "rgb48be",
  193. .nb_channels = 3,
  194. .color_type = FF_COLOR_RGB,
  195. .pixel_type = FF_PIXEL_PACKED,
  196. .depth = 16,
  197. .x_chroma_shift = 0, .y_chroma_shift = 0,
  198. },
  199. [PIX_FMT_RGB48LE] = {
  200. .name = "rgb48le",
  201. .nb_channels = 3,
  202. .color_type = FF_COLOR_RGB,
  203. .pixel_type = FF_PIXEL_PACKED,
  204. .depth = 16,
  205. .x_chroma_shift = 0, .y_chroma_shift = 0,
  206. },
  207. [PIX_FMT_RGB565] = {
  208. .name = "rgb565",
  209. .nb_channels = 3,
  210. .color_type = FF_COLOR_RGB,
  211. .pixel_type = FF_PIXEL_PACKED,
  212. .depth = 5,
  213. .x_chroma_shift = 0, .y_chroma_shift = 0,
  214. },
  215. [PIX_FMT_RGB555] = {
  216. .name = "rgb555",
  217. .nb_channels = 3,
  218. .color_type = FF_COLOR_RGB,
  219. .pixel_type = FF_PIXEL_PACKED,
  220. .depth = 5,
  221. .x_chroma_shift = 0, .y_chroma_shift = 0,
  222. },
  223. /* gray / mono formats */
  224. [PIX_FMT_GRAY16BE] = {
  225. .name = "gray16be",
  226. .nb_channels = 1,
  227. .color_type = FF_COLOR_GRAY,
  228. .pixel_type = FF_PIXEL_PLANAR,
  229. .depth = 16,
  230. },
  231. [PIX_FMT_GRAY16LE] = {
  232. .name = "gray16le",
  233. .nb_channels = 1,
  234. .color_type = FF_COLOR_GRAY,
  235. .pixel_type = FF_PIXEL_PLANAR,
  236. .depth = 16,
  237. },
  238. [PIX_FMT_GRAY8] = {
  239. .name = "gray",
  240. .nb_channels = 1,
  241. .color_type = FF_COLOR_GRAY,
  242. .pixel_type = FF_PIXEL_PLANAR,
  243. .depth = 8,
  244. },
  245. [PIX_FMT_MONOWHITE] = {
  246. .name = "monow",
  247. .nb_channels = 1,
  248. .color_type = FF_COLOR_GRAY,
  249. .pixel_type = FF_PIXEL_PLANAR,
  250. .depth = 1,
  251. },
  252. [PIX_FMT_MONOBLACK] = {
  253. .name = "monob",
  254. .nb_channels = 1,
  255. .color_type = FF_COLOR_GRAY,
  256. .pixel_type = FF_PIXEL_PLANAR,
  257. .depth = 1,
  258. },
  259. /* paletted formats */
  260. [PIX_FMT_PAL8] = {
  261. .name = "pal8",
  262. .nb_channels = 4, .is_alpha = 1,
  263. .color_type = FF_COLOR_RGB,
  264. .pixel_type = FF_PIXEL_PALETTE,
  265. .depth = 8,
  266. },
  267. [PIX_FMT_XVMC_MPEG2_MC] = {
  268. .name = "xvmcmc",
  269. .is_hwaccel = 1,
  270. },
  271. [PIX_FMT_XVMC_MPEG2_IDCT] = {
  272. .name = "xvmcidct",
  273. .is_hwaccel = 1,
  274. },
  275. [PIX_FMT_VDPAU_MPEG1] = {
  276. .name = "vdpau_mpeg1",
  277. .is_hwaccel = 1,
  278. },
  279. [PIX_FMT_VDPAU_MPEG2] = {
  280. .name = "vdpau_mpeg2",
  281. .is_hwaccel = 1,
  282. },
  283. [PIX_FMT_VDPAU_H264] = {
  284. .name = "vdpau_h264",
  285. .is_hwaccel = 1,
  286. },
  287. [PIX_FMT_VDPAU_WMV3] = {
  288. .name = "vdpau_wmv3",
  289. .is_hwaccel = 1,
  290. },
  291. [PIX_FMT_VDPAU_VC1] = {
  292. .name = "vdpau_vc1",
  293. .is_hwaccel = 1,
  294. },
  295. [PIX_FMT_UYYVYY411] = {
  296. .name = "uyyvyy411",
  297. .nb_channels = 1,
  298. .color_type = FF_COLOR_YUV,
  299. .pixel_type = FF_PIXEL_PACKED,
  300. .depth = 8,
  301. .x_chroma_shift = 2, .y_chroma_shift = 0,
  302. },
  303. [PIX_FMT_BGR32] = {
  304. .name = "bgr32",
  305. .nb_channels = 4, .is_alpha = 1,
  306. .color_type = FF_COLOR_RGB,
  307. .pixel_type = FF_PIXEL_PACKED,
  308. .depth = 8,
  309. .x_chroma_shift = 0, .y_chroma_shift = 0,
  310. },
  311. [PIX_FMT_BGR565] = {
  312. .name = "bgr565",
  313. .nb_channels = 3,
  314. .color_type = FF_COLOR_RGB,
  315. .pixel_type = FF_PIXEL_PACKED,
  316. .depth = 5,
  317. .x_chroma_shift = 0, .y_chroma_shift = 0,
  318. },
  319. [PIX_FMT_BGR555] = {
  320. .name = "bgr555",
  321. .nb_channels = 3,
  322. .color_type = FF_COLOR_RGB,
  323. .pixel_type = FF_PIXEL_PACKED,
  324. .depth = 5,
  325. .x_chroma_shift = 0, .y_chroma_shift = 0,
  326. },
  327. [PIX_FMT_RGB8] = {
  328. .name = "rgb8",
  329. .nb_channels = 1,
  330. .color_type = FF_COLOR_RGB,
  331. .pixel_type = FF_PIXEL_PACKED,
  332. .depth = 8,
  333. .x_chroma_shift = 0, .y_chroma_shift = 0,
  334. },
  335. [PIX_FMT_RGB4] = {
  336. .name = "rgb4",
  337. .nb_channels = 1,
  338. .color_type = FF_COLOR_RGB,
  339. .pixel_type = FF_PIXEL_PACKED,
  340. .depth = 4,
  341. .x_chroma_shift = 0, .y_chroma_shift = 0,
  342. },
  343. [PIX_FMT_RGB4_BYTE] = {
  344. .name = "rgb4_byte",
  345. .nb_channels = 1,
  346. .color_type = FF_COLOR_RGB,
  347. .pixel_type = FF_PIXEL_PACKED,
  348. .depth = 8,
  349. .x_chroma_shift = 0, .y_chroma_shift = 0,
  350. },
  351. [PIX_FMT_BGR8] = {
  352. .name = "bgr8",
  353. .nb_channels = 1,
  354. .color_type = FF_COLOR_RGB,
  355. .pixel_type = FF_PIXEL_PACKED,
  356. .depth = 8,
  357. .x_chroma_shift = 0, .y_chroma_shift = 0,
  358. },
  359. [PIX_FMT_BGR4] = {
  360. .name = "bgr4",
  361. .nb_channels = 1,
  362. .color_type = FF_COLOR_RGB,
  363. .pixel_type = FF_PIXEL_PACKED,
  364. .depth = 4,
  365. .x_chroma_shift = 0, .y_chroma_shift = 0,
  366. },
  367. [PIX_FMT_BGR4_BYTE] = {
  368. .name = "bgr4_byte",
  369. .nb_channels = 1,
  370. .color_type = FF_COLOR_RGB,
  371. .pixel_type = FF_PIXEL_PACKED,
  372. .depth = 8,
  373. .x_chroma_shift = 0, .y_chroma_shift = 0,
  374. },
  375. [PIX_FMT_NV12] = {
  376. .name = "nv12",
  377. .nb_channels = 2,
  378. .color_type = FF_COLOR_YUV,
  379. .pixel_type = FF_PIXEL_PLANAR,
  380. .depth = 8,
  381. .x_chroma_shift = 1, .y_chroma_shift = 1,
  382. },
  383. [PIX_FMT_NV21] = {
  384. .name = "nv12",
  385. .nb_channels = 2,
  386. .color_type = FF_COLOR_YUV,
  387. .pixel_type = FF_PIXEL_PLANAR,
  388. .depth = 8,
  389. .x_chroma_shift = 1, .y_chroma_shift = 1,
  390. },
  391. [PIX_FMT_BGR32_1] = {
  392. .name = "bgr32_1",
  393. .nb_channels = 4, .is_alpha = 1,
  394. .color_type = FF_COLOR_RGB,
  395. .pixel_type = FF_PIXEL_PACKED,
  396. .depth = 8,
  397. .x_chroma_shift = 0, .y_chroma_shift = 0,
  398. },
  399. [PIX_FMT_RGB32_1] = {
  400. .name = "rgb32_1",
  401. .nb_channels = 4, .is_alpha = 1,
  402. .color_type = FF_COLOR_RGB,
  403. .pixel_type = FF_PIXEL_PACKED,
  404. .depth = 8,
  405. .x_chroma_shift = 0, .y_chroma_shift = 0,
  406. },
  407. /* VA API formats */
  408. [PIX_FMT_VAAPI_MOCO] = {
  409. .name = "vaapi_moco",
  410. .is_hwaccel = 1,
  411. },
  412. [PIX_FMT_VAAPI_IDCT] = {
  413. .name = "vaapi_idct",
  414. .is_hwaccel = 1,
  415. },
  416. [PIX_FMT_VAAPI_VLD] = {
  417. .name = "vaapi_vld",
  418. .is_hwaccel = 1,
  419. },
  420. };
  421. void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift)
  422. {
  423. *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift;
  424. *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift;
  425. }
  426. const char *avcodec_get_pix_fmt_name(int pix_fmt)
  427. {
  428. if (pix_fmt < 0 || pix_fmt >= PIX_FMT_NB)
  429. return NULL;
  430. else
  431. return pix_fmt_info[pix_fmt].name;
  432. }
  433. enum PixelFormat avcodec_get_pix_fmt(const char* name)
  434. {
  435. int i;
  436. for (i=0; i < PIX_FMT_NB; i++)
  437. if (!strcmp(pix_fmt_info[i].name, name))
  438. return i;
  439. return PIX_FMT_NONE;
  440. }
  441. void avcodec_pix_fmt_string (char *buf, int buf_size, int pix_fmt)
  442. {
  443. /* print header */
  444. if (pix_fmt < 0)
  445. snprintf (buf, buf_size,
  446. "name " " nb_channels" " depth" " is_alpha"
  447. );
  448. else{
  449. PixFmtInfo info= pix_fmt_info[pix_fmt];
  450. char is_alpha_char= info.is_alpha ? 'y' : 'n';
  451. snprintf (buf, buf_size,
  452. "%-10s" " %1d " " %2d " " %c ",
  453. info.name,
  454. info.nb_channels,
  455. info.depth,
  456. is_alpha_char
  457. );
  458. }
  459. }
  460. int ff_is_hwaccel_pix_fmt(enum PixelFormat pix_fmt)
  461. {
  462. return pix_fmt_info[pix_fmt].is_hwaccel;
  463. }
  464. int ff_set_systematic_pal(uint32_t pal[256], enum PixelFormat pix_fmt){
  465. int i;
  466. for(i=0; i<256; i++){
  467. int r,g,b;
  468. switch(pix_fmt) {
  469. case PIX_FMT_RGB8:
  470. r= (i>>5 )*36;
  471. g= ((i>>2)&7)*36;
  472. b= (i&3 )*85;
  473. break;
  474. case PIX_FMT_BGR8:
  475. b= (i>>6 )*85;
  476. g= ((i>>3)&7)*36;
  477. r= (i&7 )*36;
  478. break;
  479. case PIX_FMT_RGB4_BYTE:
  480. r= (i>>3 )*255;
  481. g= ((i>>1)&3)*85;
  482. b= (i&1 )*255;
  483. break;
  484. case PIX_FMT_BGR4_BYTE:
  485. b= (i>>3 )*255;
  486. g= ((i>>1)&3)*85;
  487. r= (i&1 )*255;
  488. break;
  489. case PIX_FMT_GRAY8:
  490. r=b=g= i;
  491. break;
  492. default:
  493. return -1;
  494. }
  495. pal[i] = b + (g<<8) + (r<<16);
  496. }
  497. return 0;
  498. }
  499. int ff_fill_linesize(AVPicture *picture, int pix_fmt, int width)
  500. {
  501. int w2;
  502. const PixFmtInfo *pinfo;
  503. memset(picture->linesize, 0, sizeof(picture->linesize));
  504. pinfo = &pix_fmt_info[pix_fmt];
  505. switch(pix_fmt) {
  506. case PIX_FMT_YUV420P:
  507. case PIX_FMT_YUV422P:
  508. case PIX_FMT_YUV444P:
  509. case PIX_FMT_YUV410P:
  510. case PIX_FMT_YUV411P:
  511. case PIX_FMT_YUV440P:
  512. case PIX_FMT_YUVJ420P:
  513. case PIX_FMT_YUVJ422P:
  514. case PIX_FMT_YUVJ444P:
  515. case PIX_FMT_YUVJ440P:
  516. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  517. picture->linesize[0] = width;
  518. picture->linesize[1] = w2;
  519. picture->linesize[2] = w2;
  520. break;
  521. case PIX_FMT_YUVA420P:
  522. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  523. picture->linesize[0] = width;
  524. picture->linesize[1] = w2;
  525. picture->linesize[2] = w2;
  526. picture->linesize[3] = width;
  527. break;
  528. case PIX_FMT_NV12:
  529. case PIX_FMT_NV21:
  530. w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift;
  531. picture->linesize[0] = width;
  532. picture->linesize[1] = w2;
  533. break;
  534. case PIX_FMT_RGB24:
  535. case PIX_FMT_BGR24:
  536. picture->linesize[0] = width * 3;
  537. break;
  538. case PIX_FMT_RGB32:
  539. case PIX_FMT_BGR32:
  540. case PIX_FMT_RGB32_1:
  541. case PIX_FMT_BGR32_1:
  542. picture->linesize[0] = width * 4;
  543. break;
  544. case PIX_FMT_RGB48BE:
  545. case PIX_FMT_RGB48LE:
  546. picture->linesize[0] = width * 6;
  547. break;
  548. case PIX_FMT_GRAY16BE:
  549. case PIX_FMT_GRAY16LE:
  550. case PIX_FMT_BGR555:
  551. case PIX_FMT_BGR565:
  552. case PIX_FMT_RGB555:
  553. case PIX_FMT_RGB565:
  554. case PIX_FMT_YUYV422:
  555. picture->linesize[0] = width * 2;
  556. break;
  557. case PIX_FMT_UYVY422:
  558. picture->linesize[0] = width * 2;
  559. break;
  560. case PIX_FMT_UYYVYY411:
  561. picture->linesize[0] = width + width/2;
  562. break;
  563. case PIX_FMT_RGB4:
  564. case PIX_FMT_BGR4:
  565. picture->linesize[0] = width / 2;
  566. break;
  567. case PIX_FMT_MONOWHITE:
  568. case PIX_FMT_MONOBLACK:
  569. picture->linesize[0] = (width + 7) >> 3;
  570. break;
  571. case PIX_FMT_PAL8:
  572. case PIX_FMT_RGB8:
  573. case PIX_FMT_BGR8:
  574. case PIX_FMT_RGB4_BYTE:
  575. case PIX_FMT_BGR4_BYTE:
  576. case PIX_FMT_GRAY8:
  577. picture->linesize[0] = width;
  578. picture->linesize[1] = 4;
  579. break;
  580. default:
  581. return -1;
  582. }
  583. return 0;
  584. }
  585. int ff_fill_pointer(AVPicture *picture, uint8_t *ptr, int pix_fmt,
  586. int height)
  587. {
  588. int size, h2, size2;
  589. const PixFmtInfo *pinfo;
  590. pinfo = &pix_fmt_info[pix_fmt];
  591. size = picture->linesize[0] * height;
  592. switch(pix_fmt) {
  593. case PIX_FMT_YUV420P:
  594. case PIX_FMT_YUV422P:
  595. case PIX_FMT_YUV444P:
  596. case PIX_FMT_YUV410P:
  597. case PIX_FMT_YUV411P:
  598. case PIX_FMT_YUV440P:
  599. case PIX_FMT_YUVJ420P:
  600. case PIX_FMT_YUVJ422P:
  601. case PIX_FMT_YUVJ444P:
  602. case PIX_FMT_YUVJ440P:
  603. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  604. size2 = picture->linesize[1] * h2;
  605. picture->data[0] = ptr;
  606. picture->data[1] = picture->data[0] + size;
  607. picture->data[2] = picture->data[1] + size2;
  608. picture->data[3] = NULL;
  609. return size + 2 * size2;
  610. case PIX_FMT_YUVA420P:
  611. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  612. size2 = picture->linesize[1] * h2;
  613. picture->data[0] = ptr;
  614. picture->data[1] = picture->data[0] + size;
  615. picture->data[2] = picture->data[1] + size2;
  616. picture->data[3] = picture->data[1] + size2 + size2;
  617. return 2 * size + 2 * size2;
  618. case PIX_FMT_NV12:
  619. case PIX_FMT_NV21:
  620. h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift;
  621. size2 = picture->linesize[1] * h2 * 2;
  622. picture->data[0] = ptr;
  623. picture->data[1] = picture->data[0] + size;
  624. picture->data[2] = NULL;
  625. picture->data[3] = NULL;
  626. return size + 2 * size2;
  627. case PIX_FMT_RGB24:
  628. case PIX_FMT_BGR24:
  629. case PIX_FMT_RGB32:
  630. case PIX_FMT_BGR32:
  631. case PIX_FMT_RGB32_1:
  632. case PIX_FMT_BGR32_1:
  633. case PIX_FMT_RGB48BE:
  634. case PIX_FMT_RGB48LE:
  635. case PIX_FMT_GRAY16BE:
  636. case PIX_FMT_GRAY16LE:
  637. case PIX_FMT_BGR555:
  638. case PIX_FMT_BGR565:
  639. case PIX_FMT_RGB555:
  640. case PIX_FMT_RGB565:
  641. case PIX_FMT_YUYV422:
  642. case PIX_FMT_UYVY422:
  643. case PIX_FMT_UYYVYY411:
  644. case PIX_FMT_RGB4:
  645. case PIX_FMT_BGR4:
  646. case PIX_FMT_MONOWHITE:
  647. case PIX_FMT_MONOBLACK:
  648. picture->data[0] = ptr;
  649. picture->data[1] = NULL;
  650. picture->data[2] = NULL;
  651. picture->data[3] = NULL;
  652. return size;
  653. case PIX_FMT_PAL8:
  654. case PIX_FMT_RGB8:
  655. case PIX_FMT_BGR8:
  656. case PIX_FMT_RGB4_BYTE:
  657. case PIX_FMT_BGR4_BYTE:
  658. case PIX_FMT_GRAY8:
  659. size2 = (size + 3) & ~3;
  660. picture->data[0] = ptr;
  661. picture->data[1] = ptr + size2; /* palette is stored here as 256 32 bit words */
  662. picture->data[2] = NULL;
  663. picture->data[3] = NULL;
  664. return size2 + 256 * 4;
  665. default:
  666. picture->data[0] = NULL;
  667. picture->data[1] = NULL;
  668. picture->data[2] = NULL;
  669. picture->data[3] = NULL;
  670. return -1;
  671. }
  672. }
  673. int avpicture_fill(AVPicture *picture, uint8_t *ptr,
  674. int pix_fmt, int width, int height)
  675. {
  676. if(avcodec_check_dimensions(NULL, width, height))
  677. return -1;
  678. if (ff_fill_linesize(picture, pix_fmt, width))
  679. return -1;
  680. return ff_fill_pointer(picture, ptr, pix_fmt, height);
  681. }
  682. int avpicture_layout(const AVPicture* src, int pix_fmt, int width, int height,
  683. unsigned char *dest, int dest_size)
  684. {
  685. const PixFmtInfo* pf = &pix_fmt_info[pix_fmt];
  686. int i, j, w, ow, h, oh, data_planes;
  687. const unsigned char* s;
  688. int size = avpicture_get_size(pix_fmt, width, height);
  689. if (size > dest_size || size < 0)
  690. return -1;
  691. if (pf->pixel_type == FF_PIXEL_PACKED || pf->pixel_type == FF_PIXEL_PALETTE) {
  692. if (pix_fmt == PIX_FMT_YUYV422 ||
  693. pix_fmt == PIX_FMT_UYVY422 ||
  694. pix_fmt == PIX_FMT_BGR565 ||
  695. pix_fmt == PIX_FMT_BGR555 ||
  696. pix_fmt == PIX_FMT_RGB565 ||
  697. pix_fmt == PIX_FMT_RGB555)
  698. w = width * 2;
  699. else if (pix_fmt == PIX_FMT_UYYVYY411)
  700. w = width + width/2;
  701. else if (pix_fmt == PIX_FMT_PAL8)
  702. w = width;
  703. else
  704. w = width * (pf->depth * pf->nb_channels / 8);
  705. data_planes = 1;
  706. h = height;
  707. } else {
  708. data_planes = pf->nb_channels;
  709. w = (width*pf->depth + 7)/8;
  710. h = height;
  711. }
  712. ow = w;
  713. oh = h;
  714. for (i=0; i<data_planes; i++) {
  715. if (i == 1) {
  716. w = width >> pf->x_chroma_shift;
  717. h = height >> pf->y_chroma_shift;
  718. } else if (i == 3) {
  719. w = ow;
  720. h = oh;
  721. }
  722. s = src->data[i];
  723. for(j=0; j<h; j++) {
  724. memcpy(dest, s, w);
  725. dest += w;
  726. s += src->linesize[i];
  727. }
  728. }
  729. if (pf->pixel_type == FF_PIXEL_PALETTE)
  730. memcpy((unsigned char *)(((size_t)dest + 3) & ~3), src->data[1], 256 * 4);
  731. return size;
  732. }
  733. int avpicture_get_size(int pix_fmt, int width, int height)
  734. {
  735. AVPicture dummy_pict;
  736. return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height);
  737. }
  738. int avcodec_get_pix_fmt_loss(int dst_pix_fmt, int src_pix_fmt,
  739. int has_alpha)
  740. {
  741. const PixFmtInfo *pf, *ps;
  742. int loss;
  743. ps = &pix_fmt_info[src_pix_fmt];
  744. pf = &pix_fmt_info[dst_pix_fmt];
  745. /* compute loss */
  746. loss = 0;
  747. pf = &pix_fmt_info[dst_pix_fmt];
  748. if (pf->depth < ps->depth ||
  749. (dst_pix_fmt == PIX_FMT_RGB555 && src_pix_fmt == PIX_FMT_RGB565))
  750. loss |= FF_LOSS_DEPTH;
  751. if (pf->x_chroma_shift > ps->x_chroma_shift ||
  752. pf->y_chroma_shift > ps->y_chroma_shift)
  753. loss |= FF_LOSS_RESOLUTION;
  754. switch(pf->color_type) {
  755. case FF_COLOR_RGB:
  756. if (ps->color_type != FF_COLOR_RGB &&
  757. ps->color_type != FF_COLOR_GRAY)
  758. loss |= FF_LOSS_COLORSPACE;
  759. break;
  760. case FF_COLOR_GRAY:
  761. if (ps->color_type != FF_COLOR_GRAY)
  762. loss |= FF_LOSS_COLORSPACE;
  763. break;
  764. case FF_COLOR_YUV:
  765. if (ps->color_type != FF_COLOR_YUV)
  766. loss |= FF_LOSS_COLORSPACE;
  767. break;
  768. case FF_COLOR_YUV_JPEG:
  769. if (ps->color_type != FF_COLOR_YUV_JPEG &&
  770. ps->color_type != FF_COLOR_YUV &&
  771. ps->color_type != FF_COLOR_GRAY)
  772. loss |= FF_LOSS_COLORSPACE;
  773. break;
  774. default:
  775. /* fail safe test */
  776. if (ps->color_type != pf->color_type)
  777. loss |= FF_LOSS_COLORSPACE;
  778. break;
  779. }
  780. if (pf->color_type == FF_COLOR_GRAY &&
  781. ps->color_type != FF_COLOR_GRAY)
  782. loss |= FF_LOSS_CHROMA;
  783. if (!pf->is_alpha && (ps->is_alpha && has_alpha))
  784. loss |= FF_LOSS_ALPHA;
  785. if (pf->pixel_type == FF_PIXEL_PALETTE &&
  786. (ps->pixel_type != FF_PIXEL_PALETTE && ps->color_type != FF_COLOR_GRAY))
  787. loss |= FF_LOSS_COLORQUANT;
  788. return loss;
  789. }
  790. static int avg_bits_per_pixel(int pix_fmt)
  791. {
  792. int bits;
  793. const PixFmtInfo *pf;
  794. pf = &pix_fmt_info[pix_fmt];
  795. switch(pf->pixel_type) {
  796. case FF_PIXEL_PACKED:
  797. switch(pix_fmt) {
  798. case PIX_FMT_YUYV422:
  799. case PIX_FMT_UYVY422:
  800. case PIX_FMT_RGB565:
  801. case PIX_FMT_RGB555:
  802. case PIX_FMT_BGR565:
  803. case PIX_FMT_BGR555:
  804. bits = 16;
  805. break;
  806. case PIX_FMT_UYYVYY411:
  807. bits = 12;
  808. break;
  809. default:
  810. bits = pf->depth * pf->nb_channels;
  811. break;
  812. }
  813. break;
  814. case FF_PIXEL_PLANAR:
  815. if (pf->x_chroma_shift == 0 && pf->y_chroma_shift == 0) {
  816. bits = pf->depth * pf->nb_channels;
  817. } else {
  818. bits = pf->depth + ((2 * pf->depth) >>
  819. (pf->x_chroma_shift + pf->y_chroma_shift));
  820. }
  821. break;
  822. case FF_PIXEL_PALETTE:
  823. bits = 8;
  824. break;
  825. default:
  826. bits = -1;
  827. break;
  828. }
  829. return bits;
  830. }
  831. static int avcodec_find_best_pix_fmt1(int64_t pix_fmt_mask,
  832. int src_pix_fmt,
  833. int has_alpha,
  834. int loss_mask)
  835. {
  836. int dist, i, loss, min_dist, dst_pix_fmt;
  837. /* find exact color match with smallest size */
  838. dst_pix_fmt = -1;
  839. min_dist = 0x7fffffff;
  840. for(i = 0;i < PIX_FMT_NB; i++) {
  841. if (pix_fmt_mask & (1ULL << i)) {
  842. loss = avcodec_get_pix_fmt_loss(i, src_pix_fmt, has_alpha) & loss_mask;
  843. if (loss == 0) {
  844. dist = avg_bits_per_pixel(i);
  845. if (dist < min_dist) {
  846. min_dist = dist;
  847. dst_pix_fmt = i;
  848. }
  849. }
  850. }
  851. }
  852. return dst_pix_fmt;
  853. }
  854. int avcodec_find_best_pix_fmt(int64_t pix_fmt_mask, int src_pix_fmt,
  855. int has_alpha, int *loss_ptr)
  856. {
  857. int dst_pix_fmt, loss_mask, i;
  858. static const int loss_mask_order[] = {
  859. ~0, /* no loss first */
  860. ~FF_LOSS_ALPHA,
  861. ~FF_LOSS_RESOLUTION,
  862. ~(FF_LOSS_COLORSPACE | FF_LOSS_RESOLUTION),
  863. ~FF_LOSS_COLORQUANT,
  864. ~FF_LOSS_DEPTH,
  865. 0,
  866. };
  867. /* try with successive loss */
  868. i = 0;
  869. for(;;) {
  870. loss_mask = loss_mask_order[i++];
  871. dst_pix_fmt = avcodec_find_best_pix_fmt1(pix_fmt_mask, src_pix_fmt,
  872. has_alpha, loss_mask);
  873. if (dst_pix_fmt >= 0)
  874. goto found;
  875. if (loss_mask == 0)
  876. break;
  877. }
  878. return -1;
  879. found:
  880. if (loss_ptr)
  881. *loss_ptr = avcodec_get_pix_fmt_loss(dst_pix_fmt, src_pix_fmt, has_alpha);
  882. return dst_pix_fmt;
  883. }
  884. void ff_img_copy_plane(uint8_t *dst, int dst_wrap,
  885. const uint8_t *src, int src_wrap,
  886. int width, int height)
  887. {
  888. if((!dst) || (!src))
  889. return;
  890. for(;height > 0; height--) {
  891. memcpy(dst, src, width);
  892. dst += dst_wrap;
  893. src += src_wrap;
  894. }
  895. }
  896. int ff_get_plane_bytewidth(enum PixelFormat pix_fmt, int width, int plane)
  897. {
  898. int bits;
  899. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  900. pf = &pix_fmt_info[pix_fmt];
  901. switch(pf->pixel_type) {
  902. case FF_PIXEL_PACKED:
  903. switch(pix_fmt) {
  904. case PIX_FMT_YUYV422:
  905. case PIX_FMT_UYVY422:
  906. case PIX_FMT_RGB565:
  907. case PIX_FMT_RGB555:
  908. case PIX_FMT_BGR565:
  909. case PIX_FMT_BGR555:
  910. bits = 16;
  911. break;
  912. case PIX_FMT_UYYVYY411:
  913. bits = 12;
  914. break;
  915. default:
  916. bits = pf->depth * pf->nb_channels;
  917. break;
  918. }
  919. return (width * bits + 7) >> 3;
  920. break;
  921. case FF_PIXEL_PLANAR:
  922. if (plane == 1 || plane == 2)
  923. width= -((-width)>>pf->x_chroma_shift);
  924. return (width * pf->depth + 7) >> 3;
  925. break;
  926. case FF_PIXEL_PALETTE:
  927. if (plane == 0)
  928. return width;
  929. break;
  930. }
  931. return -1;
  932. }
  933. void av_picture_copy(AVPicture *dst, const AVPicture *src,
  934. int pix_fmt, int width, int height)
  935. {
  936. int i;
  937. const PixFmtInfo *pf = &pix_fmt_info[pix_fmt];
  938. pf = &pix_fmt_info[pix_fmt];
  939. switch(pf->pixel_type) {
  940. case FF_PIXEL_PACKED:
  941. case FF_PIXEL_PLANAR:
  942. for(i = 0; i < pf->nb_channels; i++) {
  943. int h;
  944. int bwidth = ff_get_plane_bytewidth(pix_fmt, width, i);
  945. h = height;
  946. if (i == 1 || i == 2) {
  947. h= -((-height)>>pf->y_chroma_shift);
  948. }
  949. ff_img_copy_plane(dst->data[i], dst->linesize[i],
  950. src->data[i], src->linesize[i],
  951. bwidth, h);
  952. }
  953. break;
  954. case FF_PIXEL_PALETTE:
  955. ff_img_copy_plane(dst->data[0], dst->linesize[0],
  956. src->data[0], src->linesize[0],
  957. width, height);
  958. /* copy the palette */
  959. ff_img_copy_plane(dst->data[1], dst->linesize[1],
  960. src->data[1], src->linesize[1],
  961. 4, 256);
  962. break;
  963. }
  964. }
  965. /* XXX: totally non optimized */
  966. static void yuyv422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  967. int width, int height)
  968. {
  969. const uint8_t *p, *p1;
  970. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  971. int w;
  972. p1 = src->data[0];
  973. lum1 = dst->data[0];
  974. cb1 = dst->data[1];
  975. cr1 = dst->data[2];
  976. for(;height >= 1; height -= 2) {
  977. p = p1;
  978. lum = lum1;
  979. cb = cb1;
  980. cr = cr1;
  981. for(w = width; w >= 2; w -= 2) {
  982. lum[0] = p[0];
  983. cb[0] = p[1];
  984. lum[1] = p[2];
  985. cr[0] = p[3];
  986. p += 4;
  987. lum += 2;
  988. cb++;
  989. cr++;
  990. }
  991. if (w) {
  992. lum[0] = p[0];
  993. cb[0] = p[1];
  994. cr[0] = p[3];
  995. cb++;
  996. cr++;
  997. }
  998. p1 += src->linesize[0];
  999. lum1 += dst->linesize[0];
  1000. if (height>1) {
  1001. p = p1;
  1002. lum = lum1;
  1003. for(w = width; w >= 2; w -= 2) {
  1004. lum[0] = p[0];
  1005. lum[1] = p[2];
  1006. p += 4;
  1007. lum += 2;
  1008. }
  1009. if (w) {
  1010. lum[0] = p[0];
  1011. }
  1012. p1 += src->linesize[0];
  1013. lum1 += dst->linesize[0];
  1014. }
  1015. cb1 += dst->linesize[1];
  1016. cr1 += dst->linesize[2];
  1017. }
  1018. }
  1019. static void uyvy422_to_yuv420p(AVPicture *dst, const AVPicture *src,
  1020. int width, int height)
  1021. {
  1022. const uint8_t *p, *p1;
  1023. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1024. int w;
  1025. p1 = src->data[0];
  1026. lum1 = dst->data[0];
  1027. cb1 = dst->data[1];
  1028. cr1 = dst->data[2];
  1029. for(;height >= 1; height -= 2) {
  1030. p = p1;
  1031. lum = lum1;
  1032. cb = cb1;
  1033. cr = cr1;
  1034. for(w = width; w >= 2; w -= 2) {
  1035. lum[0] = p[1];
  1036. cb[0] = p[0];
  1037. lum[1] = p[3];
  1038. cr[0] = p[2];
  1039. p += 4;
  1040. lum += 2;
  1041. cb++;
  1042. cr++;
  1043. }
  1044. if (w) {
  1045. lum[0] = p[1];
  1046. cb[0] = p[0];
  1047. cr[0] = p[2];
  1048. cb++;
  1049. cr++;
  1050. }
  1051. p1 += src->linesize[0];
  1052. lum1 += dst->linesize[0];
  1053. if (height>1) {
  1054. p = p1;
  1055. lum = lum1;
  1056. for(w = width; w >= 2; w -= 2) {
  1057. lum[0] = p[1];
  1058. lum[1] = p[3];
  1059. p += 4;
  1060. lum += 2;
  1061. }
  1062. if (w) {
  1063. lum[0] = p[1];
  1064. }
  1065. p1 += src->linesize[0];
  1066. lum1 += dst->linesize[0];
  1067. }
  1068. cb1 += dst->linesize[1];
  1069. cr1 += dst->linesize[2];
  1070. }
  1071. }
  1072. static void uyvy422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  1073. int width, int height)
  1074. {
  1075. const uint8_t *p, *p1;
  1076. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1077. int w;
  1078. p1 = src->data[0];
  1079. lum1 = dst->data[0];
  1080. cb1 = dst->data[1];
  1081. cr1 = dst->data[2];
  1082. for(;height > 0; height--) {
  1083. p = p1;
  1084. lum = lum1;
  1085. cb = cb1;
  1086. cr = cr1;
  1087. for(w = width; w >= 2; w -= 2) {
  1088. lum[0] = p[1];
  1089. cb[0] = p[0];
  1090. lum[1] = p[3];
  1091. cr[0] = p[2];
  1092. p += 4;
  1093. lum += 2;
  1094. cb++;
  1095. cr++;
  1096. }
  1097. p1 += src->linesize[0];
  1098. lum1 += dst->linesize[0];
  1099. cb1 += dst->linesize[1];
  1100. cr1 += dst->linesize[2];
  1101. }
  1102. }
  1103. static void yuyv422_to_yuv422p(AVPicture *dst, const AVPicture *src,
  1104. int width, int height)
  1105. {
  1106. const uint8_t *p, *p1;
  1107. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1108. int w;
  1109. p1 = src->data[0];
  1110. lum1 = dst->data[0];
  1111. cb1 = dst->data[1];
  1112. cr1 = dst->data[2];
  1113. for(;height > 0; height--) {
  1114. p = p1;
  1115. lum = lum1;
  1116. cb = cb1;
  1117. cr = cr1;
  1118. for(w = width; w >= 2; w -= 2) {
  1119. lum[0] = p[0];
  1120. cb[0] = p[1];
  1121. lum[1] = p[2];
  1122. cr[0] = p[3];
  1123. p += 4;
  1124. lum += 2;
  1125. cb++;
  1126. cr++;
  1127. }
  1128. p1 += src->linesize[0];
  1129. lum1 += dst->linesize[0];
  1130. cb1 += dst->linesize[1];
  1131. cr1 += dst->linesize[2];
  1132. }
  1133. }
  1134. static void yuv422p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1135. int width, int height)
  1136. {
  1137. uint8_t *p, *p1;
  1138. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1139. int w;
  1140. p1 = dst->data[0];
  1141. lum1 = src->data[0];
  1142. cb1 = src->data[1];
  1143. cr1 = src->data[2];
  1144. for(;height > 0; height--) {
  1145. p = p1;
  1146. lum = lum1;
  1147. cb = cb1;
  1148. cr = cr1;
  1149. for(w = width; w >= 2; w -= 2) {
  1150. p[0] = lum[0];
  1151. p[1] = cb[0];
  1152. p[2] = lum[1];
  1153. p[3] = cr[0];
  1154. p += 4;
  1155. lum += 2;
  1156. cb++;
  1157. cr++;
  1158. }
  1159. p1 += dst->linesize[0];
  1160. lum1 += src->linesize[0];
  1161. cb1 += src->linesize[1];
  1162. cr1 += src->linesize[2];
  1163. }
  1164. }
  1165. static void yuv422p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1166. int width, int height)
  1167. {
  1168. uint8_t *p, *p1;
  1169. const uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1170. int w;
  1171. p1 = dst->data[0];
  1172. lum1 = src->data[0];
  1173. cb1 = src->data[1];
  1174. cr1 = src->data[2];
  1175. for(;height > 0; height--) {
  1176. p = p1;
  1177. lum = lum1;
  1178. cb = cb1;
  1179. cr = cr1;
  1180. for(w = width; w >= 2; w -= 2) {
  1181. p[1] = lum[0];
  1182. p[0] = cb[0];
  1183. p[3] = lum[1];
  1184. p[2] = cr[0];
  1185. p += 4;
  1186. lum += 2;
  1187. cb++;
  1188. cr++;
  1189. }
  1190. p1 += dst->linesize[0];
  1191. lum1 += src->linesize[0];
  1192. cb1 += src->linesize[1];
  1193. cr1 += src->linesize[2];
  1194. }
  1195. }
  1196. static void uyyvyy411_to_yuv411p(AVPicture *dst, const AVPicture *src,
  1197. int width, int height)
  1198. {
  1199. const uint8_t *p, *p1;
  1200. uint8_t *lum, *cr, *cb, *lum1, *cr1, *cb1;
  1201. int w;
  1202. p1 = src->data[0];
  1203. lum1 = dst->data[0];
  1204. cb1 = dst->data[1];
  1205. cr1 = dst->data[2];
  1206. for(;height > 0; height--) {
  1207. p = p1;
  1208. lum = lum1;
  1209. cb = cb1;
  1210. cr = cr1;
  1211. for(w = width; w >= 4; w -= 4) {
  1212. cb[0] = p[0];
  1213. lum[0] = p[1];
  1214. lum[1] = p[2];
  1215. cr[0] = p[3];
  1216. lum[2] = p[4];
  1217. lum[3] = p[5];
  1218. p += 6;
  1219. lum += 4;
  1220. cb++;
  1221. cr++;
  1222. }
  1223. p1 += src->linesize[0];
  1224. lum1 += dst->linesize[0];
  1225. cb1 += dst->linesize[1];
  1226. cr1 += dst->linesize[2];
  1227. }
  1228. }
  1229. static void yuv420p_to_yuyv422(AVPicture *dst, const AVPicture *src,
  1230. int width, int height)
  1231. {
  1232. int w, h;
  1233. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1234. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1235. uint8_t *cb1, *cb2 = src->data[1];
  1236. uint8_t *cr1, *cr2 = src->data[2];
  1237. for(h = height / 2; h--;) {
  1238. line1 = linesrc;
  1239. line2 = linesrc + dst->linesize[0];
  1240. lum1 = lumsrc;
  1241. lum2 = lumsrc + src->linesize[0];
  1242. cb1 = cb2;
  1243. cr1 = cr2;
  1244. for(w = width / 2; w--;) {
  1245. *line1++ = *lum1++; *line2++ = *lum2++;
  1246. *line1++ = *line2++ = *cb1++;
  1247. *line1++ = *lum1++; *line2++ = *lum2++;
  1248. *line1++ = *line2++ = *cr1++;
  1249. }
  1250. linesrc += dst->linesize[0] * 2;
  1251. lumsrc += src->linesize[0] * 2;
  1252. cb2 += src->linesize[1];
  1253. cr2 += src->linesize[2];
  1254. }
  1255. }
  1256. static void yuv420p_to_uyvy422(AVPicture *dst, const AVPicture *src,
  1257. int width, int height)
  1258. {
  1259. int w, h;
  1260. uint8_t *line1, *line2, *linesrc = dst->data[0];
  1261. uint8_t *lum1, *lum2, *lumsrc = src->data[0];
  1262. uint8_t *cb1, *cb2 = src->data[1];
  1263. uint8_t *cr1, *cr2 = src->data[2];
  1264. for(h = height / 2; h--;) {
  1265. line1 = linesrc;
  1266. line2 = linesrc + dst->linesize[0];
  1267. lum1 = lumsrc;
  1268. lum2 = lumsrc + src->linesize[0];
  1269. cb1 = cb2;
  1270. cr1 = cr2;
  1271. for(w = width / 2; w--;) {
  1272. *line1++ = *line2++ = *cb1++;
  1273. *line1++ = *lum1++; *line2++ = *lum2++;
  1274. *line1++ = *line2++ = *cr1++;
  1275. *line1++ = *lum1++; *line2++ = *lum2++;
  1276. }
  1277. linesrc += dst->linesize[0] * 2;
  1278. lumsrc += src->linesize[0] * 2;
  1279. cb2 += src->linesize[1];
  1280. cr2 += src->linesize[2];
  1281. }
  1282. }
  1283. /* 2x2 -> 1x1 */
  1284. void ff_shrink22(uint8_t *dst, int dst_wrap,
  1285. const uint8_t *src, int src_wrap,
  1286. int width, int height)
  1287. {
  1288. int w;
  1289. const uint8_t *s1, *s2;
  1290. uint8_t *d;
  1291. for(;height > 0; height--) {
  1292. s1 = src;
  1293. s2 = s1 + src_wrap;
  1294. d = dst;
  1295. for(w = width;w >= 4; w-=4) {
  1296. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1297. d[1] = (s1[2] + s1[3] + s2[2] + s2[3] + 2) >> 2;
  1298. d[2] = (s1[4] + s1[5] + s2[4] + s2[5] + 2) >> 2;
  1299. d[3] = (s1[6] + s1[7] + s2[6] + s2[7] + 2) >> 2;
  1300. s1 += 8;
  1301. s2 += 8;
  1302. d += 4;
  1303. }
  1304. for(;w > 0; w--) {
  1305. d[0] = (s1[0] + s1[1] + s2[0] + s2[1] + 2) >> 2;
  1306. s1 += 2;
  1307. s2 += 2;
  1308. d++;
  1309. }
  1310. src += 2 * src_wrap;
  1311. dst += dst_wrap;
  1312. }
  1313. }
  1314. /* 4x4 -> 1x1 */
  1315. void ff_shrink44(uint8_t *dst, int dst_wrap,
  1316. const uint8_t *src, int src_wrap,
  1317. int width, int height)
  1318. {
  1319. int w;
  1320. const uint8_t *s1, *s2, *s3, *s4;
  1321. uint8_t *d;
  1322. for(;height > 0; height--) {
  1323. s1 = src;
  1324. s2 = s1 + src_wrap;
  1325. s3 = s2 + src_wrap;
  1326. s4 = s3 + src_wrap;
  1327. d = dst;
  1328. for(w = width;w > 0; w--) {
  1329. d[0] = (s1[0] + s1[1] + s1[2] + s1[3] +
  1330. s2[0] + s2[1] + s2[2] + s2[3] +
  1331. s3[0] + s3[1] + s3[2] + s3[3] +
  1332. s4[0] + s4[1] + s4[2] + s4[3] + 8) >> 4;
  1333. s1 += 4;
  1334. s2 += 4;
  1335. s3 += 4;
  1336. s4 += 4;
  1337. d++;
  1338. }
  1339. src += 4 * src_wrap;
  1340. dst += dst_wrap;
  1341. }
  1342. }
  1343. /* 8x8 -> 1x1 */
  1344. void ff_shrink88(uint8_t *dst, int dst_wrap,
  1345. const uint8_t *src, int src_wrap,
  1346. int width, int height)
  1347. {
  1348. int w, i;
  1349. for(;height > 0; height--) {
  1350. for(w = width;w > 0; w--) {
  1351. int tmp=0;
  1352. for(i=0; i<8; i++){
  1353. tmp += src[0] + src[1] + src[2] + src[3] + src[4] + src[5] + src[6] + src[7];
  1354. src += src_wrap;
  1355. }
  1356. *(dst++) = (tmp + 32)>>6;
  1357. src += 8 - 8*src_wrap;
  1358. }
  1359. src += 8*src_wrap - 8*width;
  1360. dst += dst_wrap - width;
  1361. }
  1362. }
  1363. /* XXX: add jpeg quantize code */
  1364. #define TRANSP_INDEX (6*6*6)
  1365. /* this is maybe slow, but allows for extensions */
  1366. static inline unsigned char gif_clut_index(uint8_t r, uint8_t g, uint8_t b)
  1367. {
  1368. return (((r) / 47) % 6) * 6 * 6 + (((g) / 47) % 6) * 6 + (((b) / 47) % 6);
  1369. }
  1370. static void build_rgb_palette(uint8_t *palette, int has_alpha)
  1371. {
  1372. uint32_t *pal;
  1373. static const uint8_t pal_value[6] = { 0x00, 0x33, 0x66, 0x99, 0xcc, 0xff };
  1374. int i, r, g, b;
  1375. pal = (uint32_t *)palette;
  1376. i = 0;
  1377. for(r = 0; r < 6; r++) {
  1378. for(g = 0; g < 6; g++) {
  1379. for(b = 0; b < 6; b++) {
  1380. pal[i++] = (0xff << 24) | (pal_value[r] << 16) |
  1381. (pal_value[g] << 8) | pal_value[b];
  1382. }
  1383. }
  1384. }
  1385. if (has_alpha)
  1386. pal[i++] = 0;
  1387. while (i < 256)
  1388. pal[i++] = 0xff000000;
  1389. }
  1390. /* copy bit n to bits 0 ... n - 1 */
  1391. static inline unsigned int bitcopy_n(unsigned int a, int n)
  1392. {
  1393. int mask;
  1394. mask = (1 << n) - 1;
  1395. return (a & (0xff & ~mask)) | ((-((a >> n) & 1)) & mask);
  1396. }
  1397. /* rgb555 handling */
  1398. #define RGB_NAME rgb555
  1399. #define RGB_IN(r, g, b, s)\
  1400. {\
  1401. unsigned int v = ((const uint16_t *)(s))[0];\
  1402. r = bitcopy_n(v >> (10 - 3), 3);\
  1403. g = bitcopy_n(v >> (5 - 3), 3);\
  1404. b = bitcopy_n(v << 3, 3);\
  1405. }
  1406. #define RGB_OUT(d, r, g, b)\
  1407. {\
  1408. ((uint16_t *)(d))[0] = ((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3);\
  1409. }
  1410. #define BPP 2
  1411. #include "imgconvert_template.c"
  1412. /* rgb565 handling */
  1413. #define RGB_NAME rgb565
  1414. #define RGB_IN(r, g, b, s)\
  1415. {\
  1416. unsigned int v = ((const uint16_t *)(s))[0];\
  1417. r = bitcopy_n(v >> (11 - 3), 3);\
  1418. g = bitcopy_n(v >> (5 - 2), 2);\
  1419. b = bitcopy_n(v << 3, 3);\
  1420. }
  1421. #define RGB_OUT(d, r, g, b)\
  1422. {\
  1423. ((uint16_t *)(d))[0] = ((r >> 3) << 11) | ((g >> 2) << 5) | (b >> 3);\
  1424. }
  1425. #define BPP 2
  1426. #include "imgconvert_template.c"
  1427. /* bgr24 handling */
  1428. #define RGB_NAME bgr24
  1429. #define RGB_IN(r, g, b, s)\
  1430. {\
  1431. b = (s)[0];\
  1432. g = (s)[1];\
  1433. r = (s)[2];\
  1434. }
  1435. #define RGB_OUT(d, r, g, b)\
  1436. {\
  1437. (d)[0] = b;\
  1438. (d)[1] = g;\
  1439. (d)[2] = r;\
  1440. }
  1441. #define BPP 3
  1442. #include "imgconvert_template.c"
  1443. #undef RGB_IN
  1444. #undef RGB_OUT
  1445. #undef BPP
  1446. /* rgb24 handling */
  1447. #define RGB_NAME rgb24
  1448. #define FMT_RGB24
  1449. #define RGB_IN(r, g, b, s)\
  1450. {\
  1451. r = (s)[0];\
  1452. g = (s)[1];\
  1453. b = (s)[2];\
  1454. }
  1455. #define RGB_OUT(d, r, g, b)\
  1456. {\
  1457. (d)[0] = r;\
  1458. (d)[1] = g;\
  1459. (d)[2] = b;\
  1460. }
  1461. #define BPP 3
  1462. #include "imgconvert_template.c"
  1463. /* rgb32 handling */
  1464. #define RGB_NAME rgb32
  1465. #define FMT_RGB32
  1466. #define RGB_IN(r, g, b, s)\
  1467. {\
  1468. unsigned int v = ((const uint32_t *)(s))[0];\
  1469. r = (v >> 16) & 0xff;\
  1470. g = (v >> 8) & 0xff;\
  1471. b = v & 0xff;\
  1472. }
  1473. #define RGBA_IN(r, g, b, a, s)\
  1474. {\
  1475. unsigned int v = ((const uint32_t *)(s))[0];\
  1476. a = (v >> 24) & 0xff;\
  1477. r = (v >> 16) & 0xff;\
  1478. g = (v >> 8) & 0xff;\
  1479. b = v & 0xff;\
  1480. }
  1481. #define RGBA_OUT(d, r, g, b, a)\
  1482. {\
  1483. ((uint32_t *)(d))[0] = (a << 24) | (r << 16) | (g << 8) | b;\
  1484. }
  1485. #define BPP 4
  1486. #include "imgconvert_template.c"
  1487. static void mono_to_gray(AVPicture *dst, const AVPicture *src,
  1488. int width, int height, int xor_mask)
  1489. {
  1490. const unsigned char *p;
  1491. unsigned char *q;
  1492. int v, dst_wrap, src_wrap;
  1493. int y, w;
  1494. p = src->data[0];
  1495. src_wrap = src->linesize[0] - ((width + 7) >> 3);
  1496. q = dst->data[0];
  1497. dst_wrap = dst->linesize[0] - width;
  1498. for(y=0;y<height;y++) {
  1499. w = width;
  1500. while (w >= 8) {
  1501. v = *p++ ^ xor_mask;
  1502. q[0] = -(v >> 7);
  1503. q[1] = -((v >> 6) & 1);
  1504. q[2] = -((v >> 5) & 1);
  1505. q[3] = -((v >> 4) & 1);
  1506. q[4] = -((v >> 3) & 1);
  1507. q[5] = -((v >> 2) & 1);
  1508. q[6] = -((v >> 1) & 1);
  1509. q[7] = -((v >> 0) & 1);
  1510. w -= 8;
  1511. q += 8;
  1512. }
  1513. if (w > 0) {
  1514. v = *p++ ^ xor_mask;
  1515. do {
  1516. q[0] = -((v >> 7) & 1);
  1517. q++;
  1518. v <<= 1;
  1519. } while (--w);
  1520. }
  1521. p += src_wrap;
  1522. q += dst_wrap;
  1523. }
  1524. }
  1525. static void monowhite_to_gray(AVPicture *dst, const AVPicture *src,
  1526. int width, int height)
  1527. {
  1528. mono_to_gray(dst, src, width, height, 0xff);
  1529. }
  1530. static void monoblack_to_gray(AVPicture *dst, const AVPicture *src,
  1531. int width, int height)
  1532. {
  1533. mono_to_gray(dst, src, width, height, 0x00);
  1534. }
  1535. static void gray_to_mono(AVPicture *dst, const AVPicture *src,
  1536. int width, int height, int xor_mask)
  1537. {
  1538. int n;
  1539. const uint8_t *s;
  1540. uint8_t *d;
  1541. int j, b, v, n1, src_wrap, dst_wrap, y;
  1542. s = src->data[0];
  1543. src_wrap = src->linesize[0] - width;
  1544. d = dst->data[0];
  1545. dst_wrap = dst->linesize[0] - ((width + 7) >> 3);
  1546. for(y=0;y<height;y++) {
  1547. n = width;
  1548. while (n >= 8) {
  1549. v = 0;
  1550. for(j=0;j<8;j++) {
  1551. b = s[0];
  1552. s++;
  1553. v = (v << 1) | (b >> 7);
  1554. }
  1555. d[0] = v ^ xor_mask;
  1556. d++;
  1557. n -= 8;
  1558. }
  1559. if (n > 0) {
  1560. n1 = n;
  1561. v = 0;
  1562. while (n > 0) {
  1563. b = s[0];
  1564. s++;
  1565. v = (v << 1) | (b >> 7);
  1566. n--;
  1567. }
  1568. d[0] = (v << (8 - (n1 & 7))) ^ xor_mask;
  1569. d++;
  1570. }
  1571. s += src_wrap;
  1572. d += dst_wrap;
  1573. }
  1574. }
  1575. static void gray_to_monowhite(AVPicture *dst, const AVPicture *src,
  1576. int width, int height)
  1577. {
  1578. gray_to_mono(dst, src, width, height, 0xff);
  1579. }
  1580. static void gray_to_monoblack(AVPicture *dst, const AVPicture *src,
  1581. int width, int height)
  1582. {
  1583. gray_to_mono(dst, src, width, height, 0x00);
  1584. }
  1585. static void gray_to_gray16(AVPicture *dst, const AVPicture *src,
  1586. int width, int height)
  1587. {
  1588. int x, y, src_wrap, dst_wrap;
  1589. uint8_t *s, *d;
  1590. s = src->data[0];
  1591. src_wrap = src->linesize[0] - width;
  1592. d = dst->data[0];
  1593. dst_wrap = dst->linesize[0] - width * 2;
  1594. for(y=0; y<height; y++){
  1595. for(x=0; x<width; x++){
  1596. *d++ = *s;
  1597. *d++ = *s++;
  1598. }
  1599. s += src_wrap;
  1600. d += dst_wrap;
  1601. }
  1602. }
  1603. static void gray16_to_gray(AVPicture *dst, const AVPicture *src,
  1604. int width, int height)
  1605. {
  1606. int x, y, src_wrap, dst_wrap;
  1607. uint8_t *s, *d;
  1608. s = src->data[0];
  1609. src_wrap = src->linesize[0] - width * 2;
  1610. d = dst->data[0];
  1611. dst_wrap = dst->linesize[0] - width;
  1612. for(y=0; y<height; y++){
  1613. for(x=0; x<width; x++){
  1614. *d++ = *s;
  1615. s += 2;
  1616. }
  1617. s += src_wrap;
  1618. d += dst_wrap;
  1619. }
  1620. }
  1621. static void gray16be_to_gray(AVPicture *dst, const AVPicture *src,
  1622. int width, int height)
  1623. {
  1624. gray16_to_gray(dst, src, width, height);
  1625. }
  1626. static void gray16le_to_gray(AVPicture *dst, const AVPicture *src,
  1627. int width, int height)
  1628. {
  1629. AVPicture tmpsrc = *src;
  1630. tmpsrc.data[0]++;
  1631. gray16_to_gray(dst, &tmpsrc, width, height);
  1632. }
  1633. static void gray16_to_gray16(AVPicture *dst, const AVPicture *src,
  1634. int width, int height)
  1635. {
  1636. int x, y, src_wrap, dst_wrap;
  1637. uint16_t *s, *d;
  1638. s = (uint16_t*)src->data[0];
  1639. src_wrap = (src->linesize[0] - width * 2)/2;
  1640. d = (uint16_t*)dst->data[0];
  1641. dst_wrap = (dst->linesize[0] - width * 2)/2;
  1642. for(y=0; y<height; y++){
  1643. for(x=0; x<width; x++){
  1644. *d++ = bswap_16(*s++);
  1645. }
  1646. s += src_wrap;
  1647. d += dst_wrap;
  1648. }
  1649. }
  1650. typedef struct ConvertEntry {
  1651. void (*convert)(AVPicture *dst,
  1652. const AVPicture *src, int width, int height);
  1653. } ConvertEntry;
  1654. /* Add each new conversion function in this table. In order to be able
  1655. to convert from any format to any format, the following constraints
  1656. must be satisfied:
  1657. - all FF_COLOR_RGB formats must convert to and from PIX_FMT_RGB24
  1658. - all FF_COLOR_GRAY formats must convert to and from PIX_FMT_GRAY8
  1659. - all FF_COLOR_RGB formats with alpha must convert to and from PIX_FMT_RGB32
  1660. - PIX_FMT_YUV444P and PIX_FMT_YUVJ444P must convert to and from
  1661. PIX_FMT_RGB24.
  1662. - PIX_FMT_422 must convert to and from PIX_FMT_422P.
  1663. The other conversion functions are just optimizations for common cases.
  1664. */
  1665. static const ConvertEntry convert_table[PIX_FMT_NB][PIX_FMT_NB] = {
  1666. [PIX_FMT_YUV420P] = {
  1667. [PIX_FMT_YUYV422] = {
  1668. .convert = yuv420p_to_yuyv422,
  1669. },
  1670. [PIX_FMT_RGB555] = {
  1671. .convert = yuv420p_to_rgb555
  1672. },
  1673. [PIX_FMT_RGB565] = {
  1674. .convert = yuv420p_to_rgb565
  1675. },
  1676. [PIX_FMT_BGR24] = {
  1677. .convert = yuv420p_to_bgr24
  1678. },
  1679. [PIX_FMT_RGB24] = {
  1680. .convert = yuv420p_to_rgb24
  1681. },
  1682. [PIX_FMT_RGB32] = {
  1683. .convert = yuv420p_to_rgb32
  1684. },
  1685. [PIX_FMT_UYVY422] = {
  1686. .convert = yuv420p_to_uyvy422,
  1687. },
  1688. },
  1689. [PIX_FMT_YUV422P] = {
  1690. [PIX_FMT_YUYV422] = {
  1691. .convert = yuv422p_to_yuyv422,
  1692. },
  1693. [PIX_FMT_UYVY422] = {
  1694. .convert = yuv422p_to_uyvy422,
  1695. },
  1696. },
  1697. [PIX_FMT_YUV444P] = {
  1698. [PIX_FMT_RGB24] = {
  1699. .convert = yuv444p_to_rgb24
  1700. },
  1701. },
  1702. [PIX_FMT_YUVJ420P] = {
  1703. [PIX_FMT_RGB555] = {
  1704. .convert = yuvj420p_to_rgb555
  1705. },
  1706. [PIX_FMT_RGB565] = {
  1707. .convert = yuvj420p_to_rgb565
  1708. },
  1709. [PIX_FMT_BGR24] = {
  1710. .convert = yuvj420p_to_bgr24
  1711. },
  1712. [PIX_FMT_RGB24] = {
  1713. .convert = yuvj420p_to_rgb24
  1714. },
  1715. [PIX_FMT_RGB32] = {

Large files files are truncated, but you can click here to view the full file