PageRenderTime 25ms CodeModel.GetById 31ms RepoModel.GetById 1ms app.codeStats 0ms

/external/ffmpeg/libavcodec/paf.c

https://gitlab.com/brian0218/rk3188_r-box_android4.2.2_sdk
C | 455 lines | 364 code | 71 blank | 20 comment | 60 complexity | 5a3087cd51c5f480411b0fd9bb656f59 MD5 | raw file
  1. /*
  2. * Packed Animation File video and audio decoder
  3. * Copyright (c) 2012 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/intreadwrite.h"
  22. #include "libavcodec/dsputil.h"
  23. #include "libavcodec/paf.h"
  24. #include "bytestream.h"
  25. #include "avcodec.h"
  26. static const uint8_t block_sequences[16][8] =
  27. {
  28. { 0, 0, 0, 0, 0, 0, 0, 0 },
  29. { 2, 0, 0, 0, 0, 0, 0, 0 },
  30. { 5, 7, 0, 0, 0, 0, 0, 0 },
  31. { 5, 0, 0, 0, 0, 0, 0, 0 },
  32. { 6, 0, 0, 0, 0, 0, 0, 0 },
  33. { 5, 7, 5, 7, 0, 0, 0, 0 },
  34. { 5, 7, 5, 0, 0, 0, 0, 0 },
  35. { 5, 7, 6, 0, 0, 0, 0, 0 },
  36. { 5, 5, 0, 0, 0, 0, 0, 0 },
  37. { 3, 0, 0, 0, 0, 0, 0, 0 },
  38. { 6, 6, 0, 0, 0, 0, 0, 0 },
  39. { 2, 4, 0, 0, 0, 0, 0, 0 },
  40. { 2, 4, 5, 7, 0, 0, 0, 0 },
  41. { 2, 4, 5, 0, 0, 0, 0, 0 },
  42. { 2, 4, 6, 0, 0, 0, 0, 0 },
  43. { 2, 4, 5, 7, 5, 7, 0, 0 }
  44. };
  45. typedef struct PAFVideoDecContext {
  46. AVFrame pic;
  47. GetByteContext gb;
  48. int current_frame;
  49. uint8_t *frame[4];
  50. int frame_size;
  51. int video_size;
  52. uint8_t *opcodes;
  53. } PAFVideoDecContext;
  54. static av_cold int paf_vid_init(AVCodecContext *avctx)
  55. {
  56. PAFVideoDecContext *c = avctx->priv_data;
  57. int i;
  58. if (avctx->height & 3 || avctx->width & 3) {
  59. av_log(avctx, AV_LOG_ERROR, "width and height must be multiplies of 4\n");
  60. return AVERROR_INVALIDDATA;
  61. }
  62. avctx->pix_fmt = AV_PIX_FMT_PAL8;
  63. avcodec_get_frame_defaults(&c->pic);
  64. c->frame_size = FFALIGN(avctx->height, 256) * avctx->width;
  65. c->video_size = avctx->height * avctx->width;
  66. for (i = 0; i < 4; i++) {
  67. c->frame[i] = av_mallocz(c->frame_size);
  68. if (!c->frame[i])
  69. return AVERROR(ENOMEM);
  70. }
  71. return 0;
  72. }
  73. static int get_video_page_offset(AVCodecContext *avctx, uint8_t a, uint8_t b)
  74. {
  75. int x, y;
  76. x = b & 0x7F;
  77. y = ((a & 0x3F) << 1) | (b >> 7 & 1);
  78. return y * 2 * avctx->width + x * 2;
  79. }
  80. static void copy4h(AVCodecContext *avctx, uint8_t *dst)
  81. {
  82. PAFVideoDecContext *c = avctx->priv_data;
  83. int i;
  84. for (i = 0; i < 4; i++) {
  85. bytestream2_get_buffer(&c->gb, dst, 4);
  86. dst += avctx->width;
  87. }
  88. }
  89. static void copy_color_mask(AVCodecContext *avctx, uint8_t mask, uint8_t *dst, uint8_t color)
  90. {
  91. int i;
  92. for (i = 0; i < 4; i++) {
  93. if ((mask >> 4) & (1 << (3 - i)))
  94. dst[i] = color;
  95. if ((mask & 15) & (1 << (3 - i)))
  96. dst[avctx->width + i] = color;
  97. }
  98. }
  99. static void copy_src_mask(AVCodecContext *avctx, uint8_t mask, uint8_t *dst, const uint8_t *src)
  100. {
  101. int i;
  102. for (i = 0; i < 4; i++) {
  103. if ((mask >> 4) & (1 << (3 - i)))
  104. dst[i] = src[i];
  105. if ((mask & 15) & (1 << (3 - i)))
  106. dst[avctx->width + i] = src[avctx->width + i];
  107. }
  108. }
  109. static int decode_0(AVCodecContext *avctx, uint8_t code, uint8_t *pkt)
  110. {
  111. PAFVideoDecContext *c = avctx->priv_data;
  112. uint32_t opcode_size, offset;
  113. uint8_t *dst, *dend, mask = 0, color = 0, a, b, p;
  114. const uint8_t *src, *send, *opcodes;
  115. int i, j, x = 0;
  116. i = bytestream2_get_byte(&c->gb);
  117. if (i) {
  118. if (code & 0x10) {
  119. int align;
  120. align = bytestream2_tell(&c->gb) & 3;
  121. if (align)
  122. bytestream2_skip(&c->gb, 4 - align);
  123. }
  124. do {
  125. a = bytestream2_get_byte(&c->gb);
  126. b = bytestream2_get_byte(&c->gb);
  127. p = (a & 0xC0) >> 6;
  128. dst = c->frame[p] + get_video_page_offset(avctx, a, b);
  129. dend = c->frame[p] + c->frame_size;
  130. offset = (b & 0x7F) * 2;
  131. j = bytestream2_get_le16(&c->gb) + offset;
  132. do {
  133. offset++;
  134. if (dst + 3 * avctx->width + 4 > dend)
  135. return AVERROR_INVALIDDATA;
  136. copy4h(avctx, dst);
  137. if ((offset & 0x3F) == 0)
  138. dst += avctx->width * 3;
  139. dst += 4;
  140. } while (offset < j);
  141. } while (--i);
  142. }
  143. dst = c->frame[c->current_frame];
  144. dend = c->frame[c->current_frame] + c->frame_size;
  145. do {
  146. a = bytestream2_get_byte(&c->gb);
  147. b = bytestream2_get_byte(&c->gb);
  148. p = (a & 0xC0) >> 6;
  149. src = c->frame[p] + get_video_page_offset(avctx, a, b);
  150. send = c->frame[p] + c->frame_size;
  151. if ((src + 3 * avctx->width + 4 > send) ||
  152. (dst + 3 * avctx->width + 4 > dend))
  153. return AVERROR_INVALIDDATA;
  154. copy_block4(dst, src, avctx->width, avctx->width, 4);
  155. i++;
  156. if ((i & 0x3F) == 0)
  157. dst += avctx->width * 3;
  158. dst += 4;
  159. } while (i < c->video_size / 16);
  160. opcode_size = bytestream2_get_le16(&c->gb);
  161. bytestream2_skip(&c->gb, 2);
  162. if (bytestream2_get_bytes_left(&c->gb) < opcode_size)
  163. return AVERROR_INVALIDDATA;
  164. opcodes = pkt + bytestream2_tell(&c->gb);
  165. bytestream2_skipu(&c->gb, opcode_size);
  166. dst = c->frame[c->current_frame];
  167. for (i = 0; i < avctx->height; i += 4, dst += avctx->width * 3) {
  168. for (j = 0; j < avctx->width; j += 4, dst += 4) {
  169. int opcode, k = 0;
  170. if (x > opcode_size)
  171. return AVERROR_INVALIDDATA;
  172. if (j & 4) {
  173. opcode = opcodes[x] & 15;
  174. x++;
  175. } else {
  176. opcode = opcodes[x] >> 4;
  177. }
  178. while (block_sequences[opcode][k]) {
  179. offset = avctx->width * 2;
  180. code = block_sequences[opcode][k++];
  181. switch (code) {
  182. case 2:
  183. offset = 0;
  184. case 3:
  185. color = bytestream2_get_byte(&c->gb);
  186. case 4:
  187. mask = bytestream2_get_byte(&c->gb);
  188. copy_color_mask(avctx, mask, dst + offset, color);
  189. break;
  190. case 5:
  191. offset = 0;
  192. case 6:
  193. a = bytestream2_get_byte(&c->gb);
  194. b = bytestream2_get_byte(&c->gb);
  195. p = (a & 0xC0) >> 6;
  196. src = c->frame[p] + get_video_page_offset(avctx, a, b);
  197. send = c->frame[p] + c->frame_size;
  198. case 7:
  199. if (src + offset + avctx->width + 4 > send)
  200. return AVERROR_INVALIDDATA;
  201. mask = bytestream2_get_byte(&c->gb);
  202. copy_src_mask(avctx, mask, dst + offset, src + offset);
  203. break;
  204. }
  205. }
  206. }
  207. }
  208. return 0;
  209. }
  210. static int paf_vid_decode(AVCodecContext *avctx, void *data,
  211. int *data_size, AVPacket *pkt)
  212. {
  213. PAFVideoDecContext *c = avctx->priv_data;
  214. uint8_t code, *dst, *src, *end;
  215. int i, frame, ret;
  216. c->pic.reference = 3;
  217. if ((ret = avctx->reget_buffer(avctx, &c->pic)) < 0)
  218. return ret;
  219. bytestream2_init(&c->gb, pkt->data, pkt->size);
  220. code = bytestream2_get_byte(&c->gb);
  221. if (code & 0x20) {
  222. for (i = 0; i < 4; i++)
  223. memset(c->frame[i], 0, c->frame_size);
  224. memset(c->pic.data[1], 0, AVPALETTE_SIZE);
  225. c->current_frame = 0;
  226. c->pic.key_frame = 1;
  227. c->pic.pict_type = AV_PICTURE_TYPE_I;
  228. } else {
  229. c->pic.key_frame = 0;
  230. c->pic.pict_type = AV_PICTURE_TYPE_P;
  231. }
  232. if (code & 0x40) {
  233. uint32_t *out = (uint32_t *)c->pic.data[1];
  234. int index, count;
  235. index = bytestream2_get_byte(&c->gb);
  236. count = bytestream2_get_byte(&c->gb) + 1;
  237. if (index + count > 256)
  238. return AVERROR_INVALIDDATA;
  239. if (bytestream2_get_bytes_left(&c->gb) < 3*count)
  240. return AVERROR_INVALIDDATA;
  241. out += index;
  242. for (i = 0; i < count; i++) {
  243. unsigned r, g, b;
  244. r = bytestream2_get_byteu(&c->gb);
  245. r = r << 2 | r >> 4;
  246. g = bytestream2_get_byteu(&c->gb);
  247. g = g << 2 | g >> 4;
  248. b = bytestream2_get_byteu(&c->gb);
  249. b = b << 2 | b >> 4;
  250. *out++ = 0xFFU << 24 | r << 16 | g << 8 | b;
  251. }
  252. c->pic.palette_has_changed = 1;
  253. }
  254. switch (code & 0x0F) {
  255. case 0:
  256. if ((ret = decode_0(avctx, code, pkt->data)) < 0)
  257. return ret;
  258. break;
  259. case 1:
  260. dst = c->frame[c->current_frame];
  261. bytestream2_skip(&c->gb, 2);
  262. if (bytestream2_get_bytes_left(&c->gb) < c->video_size)
  263. return AVERROR_INVALIDDATA;
  264. bytestream2_get_bufferu(&c->gb, dst, c->video_size);
  265. break;
  266. case 2:
  267. frame = bytestream2_get_byte(&c->gb);
  268. if (frame > 3)
  269. return AVERROR_INVALIDDATA;
  270. if (frame != c->current_frame)
  271. memcpy(c->frame[c->current_frame], c->frame[frame], c->frame_size);
  272. break;
  273. case 4:
  274. dst = c->frame[c->current_frame];
  275. end = dst + c->video_size;
  276. bytestream2_skip(&c->gb, 2);
  277. while (dst < end) {
  278. int8_t code;
  279. int count;
  280. if (bytestream2_get_bytes_left(&c->gb) < 2)
  281. return AVERROR_INVALIDDATA;
  282. code = bytestream2_get_byteu(&c->gb);
  283. count = FFABS(code) + 1;
  284. if (dst + count > end)
  285. return AVERROR_INVALIDDATA;
  286. if (code < 0)
  287. memset(dst, bytestream2_get_byteu(&c->gb), count);
  288. else
  289. bytestream2_get_buffer(&c->gb, dst, count);
  290. dst += count;
  291. }
  292. break;
  293. default:
  294. av_log_ask_for_sample(avctx, "unknown/invalid code\n");
  295. return AVERROR_INVALIDDATA;
  296. }
  297. dst = c->pic.data[0];
  298. src = c->frame[c->current_frame];
  299. for (i = 0; i < avctx->height; i++) {
  300. memcpy(dst, src, avctx->width);
  301. dst += c->pic.linesize[0];
  302. src += avctx->width;
  303. }
  304. c->current_frame = (c->current_frame + 1) & 3;
  305. *data_size = sizeof(AVFrame);
  306. *(AVFrame *)data = c->pic;
  307. return pkt->size;
  308. }
  309. static av_cold int paf_vid_close(AVCodecContext *avctx)
  310. {
  311. PAFVideoDecContext *c = avctx->priv_data;
  312. int i;
  313. if (c->pic.data[0])
  314. avctx->release_buffer(avctx, &c->pic);
  315. for (i = 0; i < 4; i++)
  316. av_freep(&c->frame[i]);
  317. return 0;
  318. }
  319. typedef struct PAFAudioDecContext {
  320. AVFrame frame;
  321. } PAFAudioDecContext;
  322. static av_cold int paf_aud_init(AVCodecContext *avctx)
  323. {
  324. PAFAudioDecContext *c = avctx->priv_data;
  325. if (avctx->channels != 2) {
  326. av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n");
  327. return AVERROR_INVALIDDATA;
  328. }
  329. avcodec_get_frame_defaults(&c->frame);
  330. avctx->channel_layout = AV_CH_LAYOUT_STEREO;
  331. avctx->coded_frame = &c->frame;
  332. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  333. return 0;
  334. }
  335. static int paf_aud_decode(AVCodecContext *avctx, void *data,
  336. int *got_frame_ptr, AVPacket *pkt)
  337. {
  338. PAFAudioDecContext *c = avctx->priv_data;
  339. uint8_t *buf = pkt->data;
  340. int16_t *output_samples;
  341. const uint8_t *t;
  342. int frames, ret, i, j, k;
  343. frames = pkt->size / PAF_SOUND_FRAME_SIZE;
  344. if (frames < 1)
  345. return AVERROR_INVALIDDATA;
  346. c->frame.nb_samples = PAF_SOUND_SAMPLES * frames;
  347. if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0)
  348. return ret;
  349. output_samples = (int16_t *)c->frame.data[0];
  350. for (i = 0; i < frames; i++) {
  351. t = buf + 256 * sizeof(uint16_t);
  352. for (j = 0; j < PAF_SOUND_SAMPLES; j++) {
  353. for (k = 0; k < 2; k++) {
  354. *output_samples++ = AV_RL16(buf + *t * 2);
  355. t++;
  356. }
  357. }
  358. buf += PAF_SOUND_FRAME_SIZE;
  359. }
  360. *got_frame_ptr = 1;
  361. *(AVFrame *)data = c->frame;
  362. return pkt->size;
  363. }
  364. AVCodec ff_paf_video_decoder = {
  365. .name = "paf_video",
  366. .type = AVMEDIA_TYPE_VIDEO,
  367. .id = AV_CODEC_ID_PAF_VIDEO,
  368. .priv_data_size = sizeof(PAFVideoDecContext),
  369. .init = paf_vid_init,
  370. .close = paf_vid_close,
  371. .decode = paf_vid_decode,
  372. .capabilities = CODEC_CAP_DR1,
  373. .long_name = NULL_IF_CONFIG_SMALL("Amazing Studio Packed Animation File Video"),
  374. };
  375. AVCodec ff_paf_audio_decoder = {
  376. .name = "paf_audio",
  377. .type = AVMEDIA_TYPE_AUDIO,
  378. .id = AV_CODEC_ID_PAF_AUDIO,
  379. .priv_data_size = sizeof(PAFAudioDecContext),
  380. .init = paf_aud_init,
  381. .decode = paf_aud_decode,
  382. .capabilities = CODEC_CAP_DR1,
  383. .long_name = NULL_IF_CONFIG_SMALL("Amazing Studio Packed Animation File Audio"),
  384. };