PageRenderTime 44ms CodeModel.GetById 15ms RepoModel.GetById 1ms app.codeStats 0ms

/ffmpeg/libavformat/rl2.c

https://github.com/tsangpozheng/iFrameExtractor
C | 300 lines | 186 code | 47 blank | 67 comment | 36 complexity | 69412cbcfb59de3e1148712097cd65e9 MD5 | raw file
  1. /*
  2. * RL2 Format Demuxer
  3. * Copyright (c) 2008 Sascha Sommer (saschasommer@freenet.de)
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * RL2 file demuxer
  23. * @file libavformat/rl2.c
  24. * @author Sascha Sommer (saschasommer@freenet.de)
  25. * For more information regarding the RL2 file format, visit:
  26. * http://wiki.multimedia.cx/index.php?title=RL2
  27. *
  28. * extradata:
  29. * 2 byte le initial drawing offset within 320x200 viewport
  30. * 4 byte le number of used colors
  31. * 256 * 3 bytes rgb palette
  32. * optional background_frame
  33. */
  34. #include "libavutil/intreadwrite.h"
  35. #include "avformat.h"
  36. #define EXTRADATA1_SIZE (6 + 256 * 3) ///< video base, clr, palette
  37. #define FORM_TAG MKBETAG('F', 'O', 'R', 'M')
  38. #define RLV2_TAG MKBETAG('R', 'L', 'V', '2')
  39. #define RLV3_TAG MKBETAG('R', 'L', 'V', '3')
  40. typedef struct Rl2DemuxContext {
  41. unsigned int index_pos[2]; ///< indexes in the sample tables
  42. } Rl2DemuxContext;
  43. /**
  44. * check if the file is in rl2 format
  45. * @param p probe buffer
  46. * @return 0 when the probe buffer does not contain rl2 data, > 0 otherwise
  47. */
  48. static int rl2_probe(AVProbeData *p)
  49. {
  50. if(AV_RB32(&p->buf[0]) != FORM_TAG)
  51. return 0;
  52. if(AV_RB32(&p->buf[8]) != RLV2_TAG &&
  53. AV_RB32(&p->buf[8]) != RLV3_TAG)
  54. return 0;
  55. return AVPROBE_SCORE_MAX;
  56. }
  57. /**
  58. * read rl2 header data and setup the avstreams
  59. * @param s demuxer context
  60. * @param ap format parameters
  61. * @return 0 on success, AVERROR otherwise
  62. */
  63. static av_cold int rl2_read_header(AVFormatContext *s,
  64. AVFormatParameters *ap)
  65. {
  66. ByteIOContext *pb = s->pb;
  67. AVStream *st;
  68. unsigned int frame_count;
  69. unsigned int audio_frame_counter = 0;
  70. unsigned int video_frame_counter = 0;
  71. unsigned int back_size;
  72. int data_size;
  73. unsigned short encoding_method;
  74. unsigned short sound_rate;
  75. unsigned short rate;
  76. unsigned short channels;
  77. unsigned short def_sound_size;
  78. unsigned int signature;
  79. unsigned int pts_den = 11025; /* video only case */
  80. unsigned int pts_num = 1103;
  81. unsigned int* chunk_offset = NULL;
  82. int* chunk_size = NULL;
  83. int* audio_size = NULL;
  84. int i;
  85. int ret = 0;
  86. url_fskip(pb,4); /* skip FORM tag */
  87. back_size = get_le32(pb); /** get size of the background frame */
  88. signature = get_be32(pb);
  89. data_size = get_be32(pb);
  90. frame_count = get_le32(pb);
  91. /* disallow back_sizes and frame_counts that may lead to overflows later */
  92. if(back_size > INT_MAX/2 || frame_count > INT_MAX / sizeof(uint32_t))
  93. return AVERROR_INVALIDDATA;
  94. encoding_method = get_le16(pb);
  95. sound_rate = get_le16(pb);
  96. rate = get_le16(pb);
  97. channels = get_le16(pb);
  98. def_sound_size = get_le16(pb);
  99. /** setup video stream */
  100. st = av_new_stream(s, 0);
  101. if(!st)
  102. return AVERROR(ENOMEM);
  103. st->codec->codec_type = CODEC_TYPE_VIDEO;
  104. st->codec->codec_id = CODEC_ID_RL2;
  105. st->codec->codec_tag = 0; /* no fourcc */
  106. st->codec->width = 320;
  107. st->codec->height = 200;
  108. /** allocate and fill extradata */
  109. st->codec->extradata_size = EXTRADATA1_SIZE;
  110. if(signature == RLV3_TAG && back_size > 0)
  111. st->codec->extradata_size += back_size;
  112. st->codec->extradata = av_mallocz(st->codec->extradata_size +
  113. FF_INPUT_BUFFER_PADDING_SIZE);
  114. if(!st->codec->extradata)
  115. return AVERROR(ENOMEM);
  116. if(get_buffer(pb,st->codec->extradata,st->codec->extradata_size) !=
  117. st->codec->extradata_size)
  118. return AVERROR(EIO);
  119. /** setup audio stream if present */
  120. if(sound_rate){
  121. pts_num = def_sound_size;
  122. pts_den = rate;
  123. st = av_new_stream(s, 0);
  124. if (!st)
  125. return AVERROR(ENOMEM);
  126. st->codec->codec_type = CODEC_TYPE_AUDIO;
  127. st->codec->codec_id = CODEC_ID_PCM_U8;
  128. st->codec->codec_tag = 1;
  129. st->codec->channels = channels;
  130. st->codec->bits_per_coded_sample = 8;
  131. st->codec->sample_rate = rate;
  132. st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
  133. st->codec->bits_per_coded_sample;
  134. st->codec->block_align = st->codec->channels *
  135. st->codec->bits_per_coded_sample / 8;
  136. av_set_pts_info(st,32,1,rate);
  137. }
  138. av_set_pts_info(s->streams[0], 32, pts_num, pts_den);
  139. chunk_size = av_malloc(frame_count * sizeof(uint32_t));
  140. audio_size = av_malloc(frame_count * sizeof(uint32_t));
  141. chunk_offset = av_malloc(frame_count * sizeof(uint32_t));
  142. if(!chunk_size || !audio_size || !chunk_offset){
  143. av_free(chunk_size);
  144. av_free(audio_size);
  145. av_free(chunk_offset);
  146. return AVERROR(ENOMEM);
  147. }
  148. /** read offset and size tables */
  149. for(i=0; i < frame_count;i++)
  150. chunk_size[i] = get_le32(pb);
  151. for(i=0; i < frame_count;i++)
  152. chunk_offset[i] = get_le32(pb);
  153. for(i=0; i < frame_count;i++)
  154. audio_size[i] = get_le32(pb) & 0xFFFF;
  155. /** build the sample index */
  156. for(i=0;i<frame_count;i++){
  157. if(chunk_size[i] < 0 || audio_size[i] > chunk_size[i]){
  158. ret = AVERROR_INVALIDDATA;
  159. break;
  160. }
  161. if(sound_rate && audio_size[i]){
  162. av_add_index_entry(s->streams[1], chunk_offset[i],
  163. audio_frame_counter,audio_size[i], 0, AVINDEX_KEYFRAME);
  164. audio_frame_counter += audio_size[i] / channels;
  165. }
  166. av_add_index_entry(s->streams[0], chunk_offset[i] + audio_size[i],
  167. video_frame_counter,chunk_size[i]-audio_size[i],0,AVINDEX_KEYFRAME);
  168. ++video_frame_counter;
  169. }
  170. av_free(chunk_size);
  171. av_free(audio_size);
  172. av_free(chunk_offset);
  173. return ret;
  174. }
  175. /**
  176. * read a single audio or video packet
  177. * @param s demuxer context
  178. * @param pkt the packet to be filled
  179. * @return 0 on success, AVERROR otherwise
  180. */
  181. static int rl2_read_packet(AVFormatContext *s,
  182. AVPacket *pkt)
  183. {
  184. Rl2DemuxContext *rl2 = s->priv_data;
  185. ByteIOContext *pb = s->pb;
  186. AVIndexEntry *sample = NULL;
  187. int i;
  188. int ret = 0;
  189. int stream_id = -1;
  190. int64_t pos = INT64_MAX;
  191. /** check if there is a valid video or audio entry that can be used */
  192. for(i=0; i<s->nb_streams; i++){
  193. if(rl2->index_pos[i] < s->streams[i]->nb_index_entries
  194. && s->streams[i]->index_entries[ rl2->index_pos[i] ].pos < pos){
  195. sample = &s->streams[i]->index_entries[ rl2->index_pos[i] ];
  196. pos= sample->pos;
  197. stream_id= i;
  198. }
  199. }
  200. if(stream_id == -1)
  201. return AVERROR(EIO);
  202. ++rl2->index_pos[stream_id];
  203. /** position the stream (will probably be there anyway) */
  204. url_fseek(pb, sample->pos, SEEK_SET);
  205. /** fill the packet */
  206. ret = av_get_packet(pb, pkt, sample->size);
  207. if(ret != sample->size){
  208. av_free_packet(pkt);
  209. return AVERROR(EIO);
  210. }
  211. pkt->stream_index = stream_id;
  212. pkt->pts = sample->timestamp;
  213. return ret;
  214. }
  215. /**
  216. * seek to a new timestamp
  217. * @param s demuxer context
  218. * @param stream_index index of the stream that should be seeked
  219. * @param timestamp wanted timestamp
  220. * @param flags direction and seeking mode
  221. * @return 0 on success, -1 otherwise
  222. */
  223. static int rl2_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
  224. {
  225. AVStream *st = s->streams[stream_index];
  226. Rl2DemuxContext *rl2 = s->priv_data;
  227. int i;
  228. int index = av_index_search_timestamp(st, timestamp, flags);
  229. if(index < 0)
  230. return -1;
  231. rl2->index_pos[stream_index] = index;
  232. timestamp = st->index_entries[index].timestamp;
  233. for(i=0; i < s->nb_streams; i++){
  234. AVStream *st2 = s->streams[i];
  235. index = av_index_search_timestamp(st2,
  236. av_rescale_q(timestamp, st->time_base, st2->time_base),
  237. flags | AVSEEK_FLAG_BACKWARD);
  238. if(index < 0)
  239. index = 0;
  240. rl2->index_pos[i] = index;
  241. }
  242. return 0;
  243. }
  244. AVInputFormat rl2_demuxer = {
  245. "rl2",
  246. NULL_IF_CONFIG_SMALL("RL2 format"),
  247. sizeof(Rl2DemuxContext),
  248. rl2_probe,
  249. rl2_read_header,
  250. rl2_read_packet,
  251. NULL,
  252. rl2_read_seek,
  253. };