PageRenderTime 61ms CodeModel.GetById 30ms RepoModel.GetById 1ms app.codeStats 0ms

/libavcodec/paf.c

https://gitlab.com/mba811/libav
C | 450 lines | 351 code | 68 blank | 31 comment | 60 complexity | f3b47c2bc9985656a7e7f5988eb4d055 MD5 | raw file
Possible License(s): CC-BY-SA-3.0
  1. /*
  2. * Packed Animation File video and audio decoder
  3. * Copyright (c) 2012 Paul B Mahol
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/imgutils.h"
  22. #include "libavutil/intreadwrite.h"
  23. #include "avcodec.h"
  24. #include "bytestream.h"
  25. #include "copy_block.h"
  26. #include "internal.h"
  27. #include "mathops.h"
  28. #define PAF_SOUND_SAMPLES 2205
  29. #define PAF_SOUND_FRAME_SIZE ((256 + PAF_SOUND_SAMPLES) * 2)
  30. static const uint8_t block_sequences[16][8] = {
  31. { 0, 0, 0, 0, 0, 0, 0, 0 }, { 2, 0, 0, 0, 0, 0, 0, 0 },
  32. { 5, 7, 0, 0, 0, 0, 0, 0 }, { 5, 0, 0, 0, 0, 0, 0, 0 },
  33. { 6, 0, 0, 0, 0, 0, 0, 0 }, { 5, 7, 5, 7, 0, 0, 0, 0 },
  34. { 5, 7, 5, 0, 0, 0, 0, 0 }, { 5, 7, 6, 0, 0, 0, 0, 0 },
  35. { 5, 5, 0, 0, 0, 0, 0, 0 }, { 3, 0, 0, 0, 0, 0, 0, 0 },
  36. { 6, 6, 0, 0, 0, 0, 0, 0 }, { 2, 4, 0, 0, 0, 0, 0, 0 },
  37. { 2, 4, 5, 7, 0, 0, 0, 0 }, { 2, 4, 5, 0, 0, 0, 0, 0 },
  38. { 2, 4, 6, 0, 0, 0, 0, 0 }, { 2, 4, 5, 7, 5, 7, 0, 0 },
  39. };
  40. typedef struct PAFVideoDecContext {
  41. AVFrame *pic;
  42. GetByteContext gb;
  43. int width;
  44. int height;
  45. int current_frame;
  46. uint8_t *frame[4];
  47. int frame_size;
  48. int video_size;
  49. uint8_t *opcodes;
  50. } PAFVideoDecContext;
  51. static av_cold int paf_video_close(AVCodecContext *avctx)
  52. {
  53. PAFVideoDecContext *c = avctx->priv_data;
  54. int i;
  55. av_frame_free(&c->pic);
  56. for (i = 0; i < 4; i++)
  57. av_freep(&c->frame[i]);
  58. return 0;
  59. }
  60. static av_cold int paf_video_init(AVCodecContext *avctx)
  61. {
  62. PAFVideoDecContext *c = avctx->priv_data;
  63. int i;
  64. c->width = avctx->width;
  65. c->height = avctx->height;
  66. if (avctx->height & 3 || avctx->width & 3) {
  67. av_log(avctx, AV_LOG_ERROR,
  68. "width %d and height %d must be multiplie of 4.\n",
  69. avctx->width, avctx->height);
  70. return AVERROR_INVALIDDATA;
  71. }
  72. avctx->pix_fmt = AV_PIX_FMT_PAL8;
  73. c->pic = av_frame_alloc();
  74. if (!c->pic)
  75. return AVERROR(ENOMEM);
  76. c->frame_size = avctx->width * FFALIGN(avctx->height, 256);
  77. c->video_size = avctx->width * avctx->height;
  78. for (i = 0; i < 4; i++) {
  79. c->frame[i] = av_mallocz(c->frame_size);
  80. if (!c->frame[i]) {
  81. paf_video_close(avctx);
  82. return AVERROR(ENOMEM);
  83. }
  84. }
  85. return 0;
  86. }
  87. static void read4x4block(PAFVideoDecContext *c, uint8_t *dst, int width)
  88. {
  89. int i;
  90. for (i = 0; i < 4; i++) {
  91. bytestream2_get_buffer(&c->gb, dst, 4);
  92. dst += width;
  93. }
  94. }
  95. static void copy_color_mask(uint8_t *dst, int width, uint8_t mask, uint8_t color)
  96. {
  97. int i;
  98. for (i = 0; i < 4; i++) {
  99. if (mask & (1 << 7 - i))
  100. dst[i] = color;
  101. if (mask & (1 << 3 - i))
  102. dst[width + i] = color;
  103. }
  104. }
  105. static void copy_src_mask(uint8_t *dst, int width, uint8_t mask, const uint8_t *src)
  106. {
  107. int i;
  108. for (i = 0; i < 4; i++) {
  109. if (mask & (1 << 7 - i))
  110. dst[i] = src[i];
  111. if (mask & (1 << 3 - i))
  112. dst[width + i] = src[width + i];
  113. }
  114. }
  115. static void set_src_position(PAFVideoDecContext *c,
  116. const uint8_t **p,
  117. const uint8_t **pend)
  118. {
  119. int val = bytestream2_get_be16(&c->gb);
  120. int page = val >> 14;
  121. int x = (val & 0x7F);
  122. int y = ((val >> 7) & 0x7F);
  123. *p = c->frame[page] + x * 2 + y * 2 * c->width;
  124. *pend = c->frame[page] + c->frame_size;
  125. }
  126. static int decode_0(PAFVideoDecContext *c, uint8_t *pkt, uint8_t code)
  127. {
  128. uint32_t opcode_size, offset;
  129. uint8_t *dst, *dend, mask = 0, color = 0;
  130. const uint8_t *src, *send, *opcodes;
  131. int i, j, op = 0;
  132. i = bytestream2_get_byte(&c->gb);
  133. if (i) {
  134. if (code & 0x10) {
  135. int pos = bytestream2_tell(&c->gb) & 3;
  136. if (pos)
  137. bytestream2_skip(&c->gb, 4 - pos);
  138. }
  139. do {
  140. int page, val, x, y;
  141. val = bytestream2_get_be16(&c->gb);
  142. page = val >> 14;
  143. x = (val & 0x7F) * 2;
  144. y = ((val >> 7) & 0x7F) * 2;
  145. dst = c->frame[page] + x + y * c->width;
  146. dend = c->frame[page] + c->frame_size;
  147. offset = (x & 0x7F) * 2;
  148. j = bytestream2_get_le16(&c->gb) + offset;
  149. do {
  150. offset++;
  151. if (dst + 3 * c->width + 4 > dend)
  152. return AVERROR_INVALIDDATA;
  153. read4x4block(c, dst, c->width);
  154. if ((offset & 0x3F) == 0)
  155. dst += c->width * 3;
  156. dst += 4;
  157. } while (offset < j);
  158. } while (--i);
  159. }
  160. dst = c->frame[c->current_frame];
  161. dend = c->frame[c->current_frame] + c->frame_size;
  162. do {
  163. set_src_position(c, &src, &send);
  164. if ((src + 3 * c->width + 4 > send) ||
  165. (dst + 3 * c->width + 4 > dend))
  166. return AVERROR_INVALIDDATA;
  167. copy_block4(dst, src, c->width, c->width, 4);
  168. i++;
  169. if ((i & 0x3F) == 0)
  170. dst += c->width * 3;
  171. dst += 4;
  172. } while (i < c->video_size / 16);
  173. opcode_size = bytestream2_get_le16(&c->gb);
  174. bytestream2_skip(&c->gb, 2);
  175. if (bytestream2_get_bytes_left(&c->gb) < opcode_size)
  176. return AVERROR_INVALIDDATA;
  177. opcodes = pkt + bytestream2_tell(&c->gb);
  178. bytestream2_skipu(&c->gb, opcode_size);
  179. dst = c->frame[c->current_frame];
  180. for (i = 0; i < c->height; i += 4, dst += c->width * 3)
  181. for (j = 0; j < c->width; j += 4, dst += 4) {
  182. int opcode, k = 0;
  183. if (op > opcode_size)
  184. return AVERROR_INVALIDDATA;
  185. if (j & 4) {
  186. opcode = opcodes[op] & 15;
  187. op++;
  188. } else {
  189. opcode = opcodes[op] >> 4;
  190. }
  191. while (block_sequences[opcode][k]) {
  192. offset = c->width * 2;
  193. code = block_sequences[opcode][k++];
  194. switch (code) {
  195. case 2:
  196. offset = 0;
  197. case 3:
  198. color = bytestream2_get_byte(&c->gb);
  199. case 4:
  200. mask = bytestream2_get_byte(&c->gb);
  201. copy_color_mask(dst + offset, c->width, mask, color);
  202. break;
  203. case 5:
  204. offset = 0;
  205. case 6:
  206. set_src_position(c, &src, &send);
  207. case 7:
  208. if (src + offset + c->width + 4 > send)
  209. return AVERROR_INVALIDDATA;
  210. mask = bytestream2_get_byte(&c->gb);
  211. copy_src_mask(dst + offset, c->width, mask, src + offset);
  212. break;
  213. }
  214. }
  215. }
  216. return 0;
  217. }
  218. static int paf_video_decode(AVCodecContext *avctx, void *data,
  219. int *got_frame, AVPacket *pkt)
  220. {
  221. PAFVideoDecContext *c = avctx->priv_data;
  222. uint8_t code, *dst, *end;
  223. int i, frame, ret;
  224. if ((ret = ff_reget_buffer(avctx, c->pic)) < 0)
  225. return ret;
  226. bytestream2_init(&c->gb, pkt->data, pkt->size);
  227. code = bytestream2_get_byte(&c->gb);
  228. if (code & 0x20) { // frame is keyframe
  229. for (i = 0; i < 4; i++)
  230. memset(c->frame[i], 0, c->frame_size);
  231. memset(c->pic->data[1], 0, AVPALETTE_SIZE);
  232. c->current_frame = 0;
  233. c->pic->key_frame = 1;
  234. c->pic->pict_type = AV_PICTURE_TYPE_I;
  235. } else {
  236. c->pic->key_frame = 0;
  237. c->pic->pict_type = AV_PICTURE_TYPE_P;
  238. }
  239. if (code & 0x40) { // palette update
  240. uint32_t *out = (uint32_t *)c->pic->data[1];
  241. int index, count;
  242. index = bytestream2_get_byte(&c->gb);
  243. count = bytestream2_get_byte(&c->gb) + 1;
  244. if (index + count > 256)
  245. return AVERROR_INVALIDDATA;
  246. if (bytestream2_get_bytes_left(&c->gb) < 3 * count)
  247. return AVERROR_INVALIDDATA;
  248. out += index;
  249. for (i = 0; i < count; i++) {
  250. unsigned r, g, b;
  251. r = bytestream2_get_byteu(&c->gb);
  252. r = r << 2 | r >> 4;
  253. g = bytestream2_get_byteu(&c->gb);
  254. g = g << 2 | g >> 4;
  255. b = bytestream2_get_byteu(&c->gb);
  256. b = b << 2 | b >> 4;
  257. *out++ = (0xFFU << 24) | (r << 16) | (g << 8) | b;
  258. }
  259. c->pic->palette_has_changed = 1;
  260. }
  261. switch (code & 0x0F) {
  262. case 0:
  263. /* Block-based motion compensation using 4x4 blocks with either
  264. * horizontal or vertical vectors; might incorporate VQ as well. */
  265. if ((ret = decode_0(c, pkt->data, code)) < 0)
  266. return ret;
  267. break;
  268. case 1:
  269. /* Uncompressed data. This mode specifies that (width * height) bytes
  270. * should be copied directly from the encoded buffer into the output. */
  271. dst = c->frame[c->current_frame];
  272. // possibly chunk length data
  273. bytestream2_skip(&c->gb, 2);
  274. if (bytestream2_get_bytes_left(&c->gb) < c->video_size)
  275. return AVERROR_INVALIDDATA;
  276. bytestream2_get_bufferu(&c->gb, dst, c->video_size);
  277. break;
  278. case 2:
  279. /* Copy reference frame: Consume the next byte in the stream as the
  280. * reference frame (which should be 0, 1, 2, or 3, and should not be
  281. * the same as the current frame number). */
  282. frame = bytestream2_get_byte(&c->gb);
  283. if (frame > 3)
  284. return AVERROR_INVALIDDATA;
  285. if (frame != c->current_frame)
  286. memcpy(c->frame[c->current_frame], c->frame[frame], c->frame_size);
  287. break;
  288. case 4:
  289. /* Run length encoding.*/
  290. dst = c->frame[c->current_frame];
  291. end = dst + c->video_size;
  292. bytestream2_skip(&c->gb, 2);
  293. while (dst < end) {
  294. int8_t code;
  295. int count;
  296. if (bytestream2_get_bytes_left(&c->gb) < 2)
  297. return AVERROR_INVALIDDATA;
  298. code = bytestream2_get_byteu(&c->gb);
  299. count = FFABS(code) + 1;
  300. if (dst + count > end)
  301. return AVERROR_INVALIDDATA;
  302. if (code < 0)
  303. memset(dst, bytestream2_get_byteu(&c->gb), count);
  304. else
  305. bytestream2_get_buffer(&c->gb, dst, count);
  306. dst += count;
  307. }
  308. break;
  309. default:
  310. avpriv_request_sample(avctx, "unknown/invalid code");
  311. return AVERROR_INVALIDDATA;
  312. }
  313. av_image_copy_plane(c->pic->data[0], c->pic->linesize[0],
  314. c->frame[c->current_frame], c->width,
  315. c->width, c->height);
  316. c->current_frame = (c->current_frame + 1) & 3;
  317. if ((ret = av_frame_ref(data, c->pic)) < 0)
  318. return ret;
  319. *got_frame = 1;
  320. return pkt->size;
  321. }
  322. static av_cold int paf_audio_init(AVCodecContext *avctx)
  323. {
  324. if (avctx->channels != 2) {
  325. av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n");
  326. return AVERROR_INVALIDDATA;
  327. }
  328. avctx->channel_layout = AV_CH_LAYOUT_STEREO;
  329. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  330. return 0;
  331. }
  332. static int paf_audio_decode(AVCodecContext *avctx, void *data,
  333. int *got_frame, AVPacket *pkt)
  334. {
  335. AVFrame *frame = data;
  336. int16_t *output_samples;
  337. const uint8_t *src = pkt->data;
  338. int frames, ret, i, j;
  339. int16_t cb[256];
  340. frames = pkt->size / PAF_SOUND_FRAME_SIZE;
  341. if (frames < 1)
  342. return AVERROR_INVALIDDATA;
  343. frame->nb_samples = PAF_SOUND_SAMPLES * frames;
  344. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  345. return ret;
  346. output_samples = (int16_t *)frame->data[0];
  347. // codebook of 256 16-bit samples and 8-bit indices to it
  348. for (j = 0; j < frames; j++) {
  349. for (i = 0; i < 256; i++)
  350. cb[i] = sign_extend(AV_RL16(src + i * 2), 16);
  351. src += 256 * 2;
  352. // always 2 channels
  353. for (i = 0; i < PAF_SOUND_SAMPLES * 2; i++)
  354. *output_samples++ = cb[*src++];
  355. }
  356. *got_frame = 1;
  357. return pkt->size;
  358. }
  359. #if CONFIG_PAF_VIDEO_DECODER
  360. AVCodec ff_paf_video_decoder = {
  361. .name = "paf_video",
  362. .long_name = NULL_IF_CONFIG_SMALL("Amazing Studio Packed Animation File Video"),
  363. .type = AVMEDIA_TYPE_VIDEO,
  364. .id = AV_CODEC_ID_PAF_VIDEO,
  365. .priv_data_size = sizeof(PAFVideoDecContext),
  366. .init = paf_video_init,
  367. .close = paf_video_close,
  368. .decode = paf_video_decode,
  369. .capabilities = CODEC_CAP_DR1,
  370. };
  371. #endif
  372. #if CONFIG_PAF_AUDIO_DECODER
  373. AVCodec ff_paf_audio_decoder = {
  374. .name = "paf_audio",
  375. .long_name = NULL_IF_CONFIG_SMALL("Amazing Studio Packed Animation File Audio"),
  376. .type = AVMEDIA_TYPE_AUDIO,
  377. .id = AV_CODEC_ID_PAF_AUDIO,
  378. .init = paf_audio_init,
  379. .decode = paf_audio_decode,
  380. .capabilities = CODEC_CAP_DR1,
  381. };
  382. #endif