PageRenderTime 28ms CodeModel.GetById 16ms RepoModel.GetById 0ms app.codeStats 1ms

/deps/ffmpeg/libavcodec/resample.c

http://github.com/lince/libffmpeg-c
C | 427 lines | 327 code | 58 blank | 42 comment | 79 complexity | bc5c70400a13764d2ba0b8b4910a0a9a MD5 | raw file
Possible License(s): LGPL-2.1, LGPL-3.0, CC-BY-SA-3.0, GPL-2.0, GPL-3.0
  1. /*
  2. * samplerate conversion for both audio and video
  3. * Copyright (c) 2000 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * samplerate conversion for both audio and video
  24. */
  25. #include "avcodec.h"
  26. #include "audioconvert.h"
  27. #include "libavutil/opt.h"
  28. #include "libavutil/samplefmt.h"
  29. #define MAX_CHANNELS 8
  30. struct AVResampleContext;
  31. static const char *context_to_name(void *ptr)
  32. {
  33. return "audioresample";
  34. }
  35. static const AVOption options[] = {{NULL}};
  36. static const AVClass audioresample_context_class = {
  37. "ReSampleContext", context_to_name, options, LIBAVUTIL_VERSION_INT
  38. };
  39. struct ReSampleContext {
  40. struct AVResampleContext *resample_context;
  41. short *temp[MAX_CHANNELS];
  42. int temp_len;
  43. float ratio;
  44. /* channel convert */
  45. int input_channels, output_channels, filter_channels;
  46. AVAudioConvert *convert_ctx[2];
  47. enum AVSampleFormat sample_fmt[2]; ///< input and output sample format
  48. unsigned sample_size[2]; ///< size of one sample in sample_fmt
  49. short *buffer[2]; ///< buffers used for conversion to S16
  50. unsigned buffer_size[2]; ///< sizes of allocated buffers
  51. };
  52. /* n1: number of samples */
  53. static void stereo_to_mono(short *output, short *input, int n1)
  54. {
  55. short *p, *q;
  56. int n = n1;
  57. p = input;
  58. q = output;
  59. while (n >= 4) {
  60. q[0] = (p[0] + p[1]) >> 1;
  61. q[1] = (p[2] + p[3]) >> 1;
  62. q[2] = (p[4] + p[5]) >> 1;
  63. q[3] = (p[6] + p[7]) >> 1;
  64. q += 4;
  65. p += 8;
  66. n -= 4;
  67. }
  68. while (n > 0) {
  69. q[0] = (p[0] + p[1]) >> 1;
  70. q++;
  71. p += 2;
  72. n--;
  73. }
  74. }
  75. /* n1: number of samples */
  76. static void mono_to_stereo(short *output, short *input, int n1)
  77. {
  78. short *p, *q;
  79. int n = n1;
  80. int v;
  81. p = input;
  82. q = output;
  83. while (n >= 4) {
  84. v = p[0]; q[0] = v; q[1] = v;
  85. v = p[1]; q[2] = v; q[3] = v;
  86. v = p[2]; q[4] = v; q[5] = v;
  87. v = p[3]; q[6] = v; q[7] = v;
  88. q += 8;
  89. p += 4;
  90. n -= 4;
  91. }
  92. while (n > 0) {
  93. v = p[0]; q[0] = v; q[1] = v;
  94. q += 2;
  95. p += 1;
  96. n--;
  97. }
  98. }
  99. /*
  100. 5.1 to stereo input: [fl, fr, c, lfe, rl, rr]
  101. - Left = front_left + rear_gain * rear_left + center_gain * center
  102. - Right = front_right + rear_gain * rear_right + center_gain * center
  103. Where rear_gain is usually around 0.5-1.0 and
  104. center_gain is almost always 0.7 (-3 dB)
  105. */
  106. static void surround_to_stereo(short **output, short *input, int channels, int samples)
  107. {
  108. int i;
  109. short l, r;
  110. for (i = 0; i < samples; i++) {
  111. int fl,fr,c,rl,rr,lfe;
  112. fl = input[0];
  113. fr = input[1];
  114. c = input[2];
  115. lfe = input[3];
  116. rl = input[4];
  117. rr = input[5];
  118. l = av_clip_int16(fl + (0.5 * rl) + (0.7 * c));
  119. r = av_clip_int16(fr + (0.5 * rr) + (0.7 * c));
  120. /* output l & r. */
  121. *output[0]++ = l;
  122. *output[1]++ = r;
  123. /* increment input. */
  124. input += channels;
  125. }
  126. }
  127. static void deinterleave(short **output, short *input, int channels, int samples)
  128. {
  129. int i, j;
  130. for (i = 0; i < samples; i++) {
  131. for (j = 0; j < channels; j++) {
  132. *output[j]++ = *input++;
  133. }
  134. }
  135. }
  136. static void interleave(short *output, short **input, int channels, int samples)
  137. {
  138. int i, j;
  139. for (i = 0; i < samples; i++) {
  140. for (j = 0; j < channels; j++) {
  141. *output++ = *input[j]++;
  142. }
  143. }
  144. }
  145. static void ac3_5p1_mux(short *output, short *input1, short *input2, int n)
  146. {
  147. int i;
  148. short l, r;
  149. for (i = 0; i < n; i++) {
  150. l = *input1++;
  151. r = *input2++;
  152. *output++ = l; /* left */
  153. *output++ = (l / 2) + (r / 2); /* center */
  154. *output++ = r; /* right */
  155. *output++ = 0; /* left surround */
  156. *output++ = 0; /* right surroud */
  157. *output++ = 0; /* low freq */
  158. }
  159. }
  160. #define SUPPORT_RESAMPLE(ch1, ch2, ch3, ch4, ch5, ch6, ch7, ch8) \
  161. ch8<<7 | ch7<<6 | ch6<<5 | ch5<<4 | ch4<<3 | ch3<<2 | ch2<<1 | ch1<<0
  162. static const uint8_t supported_resampling[MAX_CHANNELS] = {
  163. //ouput channels:1 2 3 4 5 6 7 8
  164. SUPPORT_RESAMPLE(1, 1, 0, 0, 0, 0, 0, 0), // 1 input channel
  165. SUPPORT_RESAMPLE(1, 1, 0, 0, 0, 1, 0, 0), // 2 input channels
  166. SUPPORT_RESAMPLE(0, 0, 1, 0, 0, 0, 0, 0), // 3 input channels
  167. SUPPORT_RESAMPLE(0, 0, 0, 1, 0, 0, 0, 0), // 4 input channels
  168. SUPPORT_RESAMPLE(0, 0, 0, 0, 1, 0, 0, 0), // 5 input channels
  169. SUPPORT_RESAMPLE(0, 1, 0, 0, 0, 1, 0, 0), // 6 input channels
  170. SUPPORT_RESAMPLE(0, 0, 0, 0, 0, 0, 1, 0), // 7 input channels
  171. SUPPORT_RESAMPLE(0, 0, 0, 0, 0, 0, 0, 1), // 8 input channels
  172. };
  173. ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
  174. int output_rate, int input_rate,
  175. enum AVSampleFormat sample_fmt_out,
  176. enum AVSampleFormat sample_fmt_in,
  177. int filter_length, int log2_phase_count,
  178. int linear, double cutoff)
  179. {
  180. ReSampleContext *s;
  181. if (input_channels > MAX_CHANNELS) {
  182. av_log(NULL, AV_LOG_ERROR,
  183. "Resampling with input channels greater than %d is unsupported.\n",
  184. MAX_CHANNELS);
  185. return NULL;
  186. }
  187. if (!(supported_resampling[input_channels-1] & (1<<(output_channels-1)))) {
  188. int i;
  189. av_log(NULL, AV_LOG_ERROR, "Unsupported audio resampling. Allowed "
  190. "output channels for %d input channel%s", input_channels,
  191. input_channels > 1 ? "s:" : ":");
  192. for (i = 0; i < MAX_CHANNELS; i++)
  193. if (supported_resampling[input_channels-1] & (1<<i))
  194. av_log(NULL, AV_LOG_ERROR, " %d", i + 1);
  195. av_log(NULL, AV_LOG_ERROR, "\n");
  196. return NULL;
  197. }
  198. s = av_mallocz(sizeof(ReSampleContext));
  199. if (!s) {
  200. av_log(NULL, AV_LOG_ERROR, "Can't allocate memory for resample context.\n");
  201. return NULL;
  202. }
  203. s->ratio = (float)output_rate / (float)input_rate;
  204. s->input_channels = input_channels;
  205. s->output_channels = output_channels;
  206. s->filter_channels = s->input_channels;
  207. if (s->output_channels < s->filter_channels)
  208. s->filter_channels = s->output_channels;
  209. s->sample_fmt[0] = sample_fmt_in;
  210. s->sample_fmt[1] = sample_fmt_out;
  211. s->sample_size[0] = av_get_bytes_per_sample(s->sample_fmt[0]);
  212. s->sample_size[1] = av_get_bytes_per_sample(s->sample_fmt[1]);
  213. if (s->sample_fmt[0] != AV_SAMPLE_FMT_S16) {
  214. if (!(s->convert_ctx[0] = av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
  215. s->sample_fmt[0], 1, NULL, 0))) {
  216. av_log(s, AV_LOG_ERROR,
  217. "Cannot convert %s sample format to s16 sample format\n",
  218. av_get_sample_fmt_name(s->sample_fmt[0]));
  219. av_free(s);
  220. return NULL;
  221. }
  222. }
  223. if (s->sample_fmt[1] != AV_SAMPLE_FMT_S16) {
  224. if (!(s->convert_ctx[1] = av_audio_convert_alloc(s->sample_fmt[1], 1,
  225. AV_SAMPLE_FMT_S16, 1, NULL, 0))) {
  226. av_log(s, AV_LOG_ERROR,
  227. "Cannot convert s16 sample format to %s sample format\n",
  228. av_get_sample_fmt_name(s->sample_fmt[1]));
  229. av_audio_convert_free(s->convert_ctx[0]);
  230. av_free(s);
  231. return NULL;
  232. }
  233. }
  234. #define TAPS 16
  235. s->resample_context = av_resample_init(output_rate, input_rate,
  236. filter_length, log2_phase_count,
  237. linear, cutoff);
  238. *(const AVClass**)s->resample_context = &audioresample_context_class;
  239. return s;
  240. }
  241. /* resample audio. 'nb_samples' is the number of input samples */
  242. /* XXX: optimize it ! */
  243. int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples)
  244. {
  245. int i, nb_samples1;
  246. short *bufin[MAX_CHANNELS];
  247. short *bufout[MAX_CHANNELS];
  248. short *buftmp2[MAX_CHANNELS], *buftmp3[MAX_CHANNELS];
  249. short *output_bak = NULL;
  250. int lenout;
  251. if (s->input_channels == s->output_channels && s->ratio == 1.0 && 0) {
  252. /* nothing to do */
  253. memcpy(output, input, nb_samples * s->input_channels * sizeof(short));
  254. return nb_samples;
  255. }
  256. if (s->sample_fmt[0] != AV_SAMPLE_FMT_S16) {
  257. int istride[1] = { s->sample_size[0] };
  258. int ostride[1] = { 2 };
  259. const void *ibuf[1] = { input };
  260. void *obuf[1];
  261. unsigned input_size = nb_samples * s->input_channels * 2;
  262. if (!s->buffer_size[0] || s->buffer_size[0] < input_size) {
  263. av_free(s->buffer[0]);
  264. s->buffer_size[0] = input_size;
  265. s->buffer[0] = av_malloc(s->buffer_size[0]);
  266. if (!s->buffer[0]) {
  267. av_log(s->resample_context, AV_LOG_ERROR, "Could not allocate buffer\n");
  268. return 0;
  269. }
  270. }
  271. obuf[0] = s->buffer[0];
  272. if (av_audio_convert(s->convert_ctx[0], obuf, ostride,
  273. ibuf, istride, nb_samples * s->input_channels) < 0) {
  274. av_log(s->resample_context, AV_LOG_ERROR,
  275. "Audio sample format conversion failed\n");
  276. return 0;
  277. }
  278. input = s->buffer[0];
  279. }
  280. lenout= 2*s->output_channels*nb_samples * s->ratio + 16;
  281. if (s->sample_fmt[1] != AV_SAMPLE_FMT_S16) {
  282. output_bak = output;
  283. if (!s->buffer_size[1] || s->buffer_size[1] < lenout) {
  284. av_free(s->buffer[1]);
  285. s->buffer_size[1] = lenout;
  286. s->buffer[1] = av_malloc(s->buffer_size[1]);
  287. if (!s->buffer[1]) {
  288. av_log(s->resample_context, AV_LOG_ERROR, "Could not allocate buffer\n");
  289. return 0;
  290. }
  291. }
  292. output = s->buffer[1];
  293. }
  294. /* XXX: move those malloc to resample init code */
  295. for (i = 0; i < s->filter_channels; i++) {
  296. bufin[i] = av_malloc((nb_samples + s->temp_len) * sizeof(short));
  297. memcpy(bufin[i], s->temp[i], s->temp_len * sizeof(short));
  298. buftmp2[i] = bufin[i] + s->temp_len;
  299. bufout[i] = av_malloc(lenout * sizeof(short));
  300. }
  301. if (s->input_channels == 2 && s->output_channels == 1) {
  302. buftmp3[0] = output;
  303. stereo_to_mono(buftmp2[0], input, nb_samples);
  304. } else if (s->output_channels >= 2 && s->input_channels == 1) {
  305. buftmp3[0] = bufout[0];
  306. memcpy(buftmp2[0], input, nb_samples * sizeof(short));
  307. } else if (s->input_channels == 6 && s->output_channels ==2) {
  308. buftmp3[0] = bufout[0];
  309. buftmp3[1] = bufout[1];
  310. surround_to_stereo(buftmp2, input, s->input_channels, nb_samples);
  311. } else if (s->output_channels >= s->input_channels && s->input_channels >= 2) {
  312. for (i = 0; i < s->input_channels; i++) {
  313. buftmp3[i] = bufout[i];
  314. }
  315. deinterleave(buftmp2, input, s->input_channels, nb_samples);
  316. } else {
  317. buftmp3[0] = output;
  318. memcpy(buftmp2[0], input, nb_samples * sizeof(short));
  319. }
  320. nb_samples += s->temp_len;
  321. /* resample each channel */
  322. nb_samples1 = 0; /* avoid warning */
  323. for (i = 0; i < s->filter_channels; i++) {
  324. int consumed;
  325. int is_last = i + 1 == s->filter_channels;
  326. nb_samples1 = av_resample(s->resample_context, buftmp3[i], bufin[i],
  327. &consumed, nb_samples, lenout, is_last);
  328. s->temp_len = nb_samples - consumed;
  329. s->temp[i] = av_realloc(s->temp[i], s->temp_len * sizeof(short));
  330. memcpy(s->temp[i], bufin[i] + consumed, s->temp_len * sizeof(short));
  331. }
  332. if (s->output_channels == 2 && s->input_channels == 1) {
  333. mono_to_stereo(output, buftmp3[0], nb_samples1);
  334. } else if (s->output_channels == 6 && s->input_channels == 2) {
  335. ac3_5p1_mux(output, buftmp3[0], buftmp3[1], nb_samples1);
  336. } else if ((s->output_channels == s->input_channels && s->input_channels >= 2) ||
  337. (s->output_channels == 2 && s->input_channels == 6)) {
  338. interleave(output, buftmp3, s->output_channels, nb_samples1);
  339. }
  340. if (s->sample_fmt[1] != AV_SAMPLE_FMT_S16) {
  341. int istride[1] = { 2 };
  342. int ostride[1] = { s->sample_size[1] };
  343. const void *ibuf[1] = { output };
  344. void *obuf[1] = { output_bak };
  345. if (av_audio_convert(s->convert_ctx[1], obuf, ostride,
  346. ibuf, istride, nb_samples1 * s->output_channels) < 0) {
  347. av_log(s->resample_context, AV_LOG_ERROR,
  348. "Audio sample format convertion failed\n");
  349. return 0;
  350. }
  351. }
  352. for (i = 0; i < s->filter_channels; i++) {
  353. av_free(bufin[i]);
  354. av_free(bufout[i]);
  355. }
  356. return nb_samples1;
  357. }
  358. void audio_resample_close(ReSampleContext *s)
  359. {
  360. int i;
  361. av_resample_close(s->resample_context);
  362. for (i = 0; i < s->filter_channels; i++)
  363. av_freep(&s->temp[i]);
  364. av_freep(&s->buffer[0]);
  365. av_freep(&s->buffer[1]);
  366. av_audio_convert_free(s->convert_ctx[0]);
  367. av_audio_convert_free(s->convert_ctx[1]);
  368. av_free(s);
  369. }