PageRenderTime 64ms CodeModel.GetById 33ms RepoModel.GetById 0ms app.codeStats 0ms

/external/webkit/Source/WebCore/webaudio/AudioBufferSourceNode.cpp

https://gitlab.com/brian0218/rk3066_r-box_android4.2.2_sdk
C++ | 455 lines | 286 code | 89 blank | 80 comment | 73 complexity | 18256390c6fb58ad4f30f6f63f0457c9 MD5 | raw file
  1. /*
  2. * Copyright (C) 2010, Google Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
  14. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  15. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  16. * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
  17. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  18. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  19. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  20. * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  21. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  22. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  23. */
  24. #include "config.h"
  25. #if ENABLE(WEB_AUDIO)
  26. #include "AudioBufferSourceNode.h"
  27. #include "AudioContext.h"
  28. #include "AudioNodeOutput.h"
  29. #include <algorithm>
  30. #include <wtf/MathExtras.h>
  31. using namespace std;
  32. namespace WebCore {
  33. const double DefaultGrainDuration = 0.020; // 20ms
  34. PassRefPtr<AudioBufferSourceNode> AudioBufferSourceNode::create(AudioContext* context, double sampleRate)
  35. {
  36. return adoptRef(new AudioBufferSourceNode(context, sampleRate));
  37. }
  38. AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* context, double sampleRate)
  39. : AudioSourceNode(context, sampleRate)
  40. , m_buffer(0)
  41. , m_isPlaying(false)
  42. , m_isLooping(false)
  43. , m_hasFinished(false)
  44. , m_startTime(0.0)
  45. , m_schedulingFrameDelay(0)
  46. , m_readIndex(0)
  47. , m_isGrain(false)
  48. , m_grainOffset(0.0)
  49. , m_grainDuration(DefaultGrainDuration)
  50. , m_grainFrameCount(0)
  51. , m_lastGain(1.0)
  52. , m_pannerNode(0)
  53. {
  54. setType(NodeTypeAudioBufferSource);
  55. m_gain = AudioGain::create("gain", 1.0, 0.0, 1.0);
  56. m_playbackRate = AudioParam::create("playbackRate", 1.0, 0.0, AudioResampler::MaxRate);
  57. // Default to mono. A call to setBuffer() will set the number of output channels to that of the buffer.
  58. addOutput(adoptPtr(new AudioNodeOutput(this, 1)));
  59. initialize();
  60. }
  61. AudioBufferSourceNode::~AudioBufferSourceNode()
  62. {
  63. uninitialize();
  64. }
  65. void AudioBufferSourceNode::process(size_t framesToProcess)
  66. {
  67. AudioBus* outputBus = output(0)->bus();
  68. if (!isInitialized()) {
  69. outputBus->zero();
  70. return;
  71. }
  72. // The audio thread can't block on this lock, so we call tryLock() instead.
  73. // Careful - this is a tryLock() and not an autolocker, so we must unlock() before every return.
  74. if (m_processLock.tryLock()) {
  75. // Check if it's time to start playing.
  76. double sampleRate = this->sampleRate();
  77. double pitchRate = totalPitchRate();
  78. double quantumStartTime = context()->currentTime();
  79. double quantumEndTime = quantumStartTime + framesToProcess / sampleRate;
  80. if (!m_isPlaying || m_hasFinished || !buffer() || m_startTime >= quantumEndTime) {
  81. // FIXME: can optimize here by propagating silent hint instead of forcing the whole chain to process silence.
  82. outputBus->zero();
  83. m_processLock.unlock();
  84. return;
  85. }
  86. // Handle sample-accurate scheduling so that buffer playback will happen at a very precise time.
  87. m_schedulingFrameDelay = 0;
  88. if (m_startTime >= quantumStartTime) {
  89. // m_schedulingFrameDelay is set here only the very first render quantum (because of above check: m_startTime >= quantumEndTime)
  90. // So: quantumStartTime <= m_startTime < quantumEndTime
  91. ASSERT(m_startTime < quantumEndTime);
  92. double startTimeInQuantum = m_startTime - quantumStartTime;
  93. double startFrameInQuantum = startTimeInQuantum * sampleRate;
  94. // m_schedulingFrameDelay is used in provideInput(), so factor in the current playback pitch rate.
  95. m_schedulingFrameDelay = static_cast<int>(pitchRate * startFrameInQuantum);
  96. }
  97. // FIXME: optimization opportunity:
  98. // With a bit of work, it should be possible to avoid going through the resampler completely when the pitchRate == 1,
  99. // especially if the pitchRate has never deviated from 1 in the past.
  100. // Read the samples through the pitch resampler. Our provideInput() method will be called by the resampler.
  101. m_resampler.setRate(pitchRate);
  102. m_resampler.process(this, outputBus, framesToProcess);
  103. // Apply the gain (in-place) to the output bus.
  104. double totalGain = gain()->value() * m_buffer->gain();
  105. outputBus->copyWithGainFrom(*outputBus, &m_lastGain, totalGain);
  106. m_processLock.unlock();
  107. } else {
  108. // Too bad - the tryLock() failed. We must be in the middle of changing buffers and were already outputting silence anyway.
  109. outputBus->zero();
  110. }
  111. }
  112. // The resampler calls us back here to get the input samples from our buffer.
  113. void AudioBufferSourceNode::provideInput(AudioBus* bus, size_t numberOfFrames)
  114. {
  115. ASSERT(context()->isAudioThread());
  116. // Basic sanity checking
  117. ASSERT(bus);
  118. ASSERT(buffer());
  119. if (!bus || !buffer())
  120. return;
  121. unsigned numberOfChannels = this->numberOfChannels();
  122. unsigned busNumberOfChannels = bus->numberOfChannels();
  123. // FIXME: we can add support for sources with more than two channels, but this is not a common case.
  124. bool channelCountGood = numberOfChannels == busNumberOfChannels && (numberOfChannels == 1 || numberOfChannels == 2);
  125. ASSERT(channelCountGood);
  126. if (!channelCountGood)
  127. return;
  128. // Get the destination pointers.
  129. float* destinationL = bus->channel(0)->data();
  130. ASSERT(destinationL);
  131. if (!destinationL)
  132. return;
  133. float* destinationR = (numberOfChannels < 2) ? 0 : bus->channel(1)->data();
  134. size_t bufferLength = buffer()->length();
  135. double bufferSampleRate = buffer()->sampleRate();
  136. // Calculate the start and end frames in our buffer that we want to play.
  137. // If m_isGrain is true, then we will be playing a portion of the total buffer.
  138. unsigned startFrame = m_isGrain ? static_cast<unsigned>(m_grainOffset * bufferSampleRate) : 0;
  139. unsigned endFrame = m_isGrain ? static_cast<unsigned>(startFrame + m_grainDuration * bufferSampleRate) : bufferLength;
  140. // This is a HACK to allow for HRTF tail-time - avoids glitch at end.
  141. // FIXME: implement tailTime for each AudioNode for a more general solution to this problem.
  142. if (m_isGrain)
  143. endFrame += 512;
  144. // Do some sanity checking.
  145. if (startFrame >= bufferLength)
  146. startFrame = !bufferLength ? 0 : bufferLength - 1;
  147. if (endFrame > bufferLength)
  148. endFrame = bufferLength;
  149. if (m_readIndex >= endFrame)
  150. m_readIndex = startFrame; // reset to start
  151. int framesToProcess = numberOfFrames;
  152. // Handle sample-accurate scheduling so that we play the buffer at a very precise time.
  153. // m_schedulingFrameDelay will only be non-zero the very first time that provideInput() is called, which corresponds
  154. // with the very start of the buffer playback.
  155. if (m_schedulingFrameDelay > 0) {
  156. ASSERT(m_schedulingFrameDelay <= framesToProcess);
  157. if (m_schedulingFrameDelay <= framesToProcess) {
  158. // Generate silence for the initial portion of the destination.
  159. memset(destinationL, 0, sizeof(float) * m_schedulingFrameDelay);
  160. destinationL += m_schedulingFrameDelay;
  161. if (destinationR) {
  162. memset(destinationR, 0, sizeof(float) * m_schedulingFrameDelay);
  163. destinationR += m_schedulingFrameDelay;
  164. }
  165. // Since we just generated silence for the initial portion, we have fewer frames to provide.
  166. framesToProcess -= m_schedulingFrameDelay;
  167. }
  168. }
  169. // We have to generate a certain number of output sample-frames, but we need to handle the case where we wrap around
  170. // from the end of the buffer to the start if playing back with looping and also the case where we simply reach the
  171. // end of the sample data, but haven't yet rendered numberOfFrames worth of output.
  172. while (framesToProcess > 0) {
  173. ASSERT(m_readIndex <= endFrame);
  174. if (m_readIndex > endFrame)
  175. return;
  176. // Figure out how many frames we can process this time.
  177. int framesAvailable = endFrame - m_readIndex;
  178. int framesThisTime = min(framesToProcess, framesAvailable);
  179. // Create the destination bus for the part of the destination we're processing this time.
  180. AudioBus currentDestinationBus(busNumberOfChannels, framesThisTime, false);
  181. currentDestinationBus.setChannelMemory(0, destinationL, framesThisTime);
  182. if (busNumberOfChannels > 1)
  183. currentDestinationBus.setChannelMemory(1, destinationR, framesThisTime);
  184. // Generate output from the buffer.
  185. readFromBuffer(&currentDestinationBus, framesThisTime);
  186. // Update the destination pointers.
  187. destinationL += framesThisTime;
  188. if (busNumberOfChannels > 1)
  189. destinationR += framesThisTime;
  190. framesToProcess -= framesThisTime;
  191. // Handle the case where we reach the end of the part of the sample data we're supposed to play for the buffer.
  192. if (m_readIndex >= endFrame) {
  193. m_readIndex = startFrame;
  194. m_grainFrameCount = 0;
  195. if (!looping()) {
  196. // If we're not looping, then stop playing when we get to the end.
  197. m_isPlaying = false;
  198. if (framesToProcess > 0) {
  199. // We're not looping and we've reached the end of the sample data, but we still need to provide more output,
  200. // so generate silence for the remaining.
  201. memset(destinationL, 0, sizeof(float) * framesToProcess);
  202. if (destinationR)
  203. memset(destinationR, 0, sizeof(float) * framesToProcess);
  204. }
  205. if (!m_hasFinished) {
  206. // Let the context dereference this AudioNode.
  207. context()->notifyNodeFinishedProcessing(this);
  208. m_hasFinished = true;
  209. }
  210. return;
  211. }
  212. }
  213. }
  214. }
  215. void AudioBufferSourceNode::readFromBuffer(AudioBus* destinationBus, size_t framesToProcess)
  216. {
  217. bool isBusGood = destinationBus && destinationBus->length() == framesToProcess && destinationBus->numberOfChannels() == numberOfChannels();
  218. ASSERT(isBusGood);
  219. if (!isBusGood)
  220. return;
  221. unsigned numberOfChannels = this->numberOfChannels();
  222. // FIXME: we can add support for sources with more than two channels, but this is not a common case.
  223. bool channelCountGood = numberOfChannels == 1 || numberOfChannels == 2;
  224. ASSERT(channelCountGood);
  225. if (!channelCountGood)
  226. return;
  227. // Get pointers to the start of the sample buffer.
  228. float* sourceL = m_buffer->getChannelData(0)->data();
  229. float* sourceR = m_buffer->numberOfChannels() == 2 ? m_buffer->getChannelData(1)->data() : 0;
  230. // Sanity check buffer access.
  231. bool isSourceGood = sourceL && (numberOfChannels == 1 || sourceR) && m_readIndex + framesToProcess <= m_buffer->length();
  232. ASSERT(isSourceGood);
  233. if (!isSourceGood)
  234. return;
  235. // Offset the pointers to the current read position in the sample buffer.
  236. sourceL += m_readIndex;
  237. sourceR += m_readIndex;
  238. // Get pointers to the destination.
  239. float* destinationL = destinationBus->channel(0)->data();
  240. float* destinationR = numberOfChannels == 2 ? destinationBus->channel(1)->data() : 0;
  241. bool isDestinationGood = destinationL && (numberOfChannels == 1 || destinationR);
  242. ASSERT(isDestinationGood);
  243. if (!isDestinationGood)
  244. return;
  245. if (m_isGrain)
  246. readFromBufferWithGrainEnvelope(sourceL, sourceR, destinationL, destinationR, framesToProcess);
  247. else {
  248. // Simply copy the data from the source buffer to the destination.
  249. memcpy(destinationL, sourceL, sizeof(float) * framesToProcess);
  250. if (numberOfChannels == 2)
  251. memcpy(destinationR, sourceR, sizeof(float) * framesToProcess);
  252. }
  253. // Advance the buffer's read index.
  254. m_readIndex += framesToProcess;
  255. }
  256. void AudioBufferSourceNode::readFromBufferWithGrainEnvelope(float* sourceL, float* sourceR, float* destinationL, float* destinationR, size_t framesToProcess)
  257. {
  258. ASSERT(sourceL && destinationL);
  259. if (!sourceL || !destinationL)
  260. return;
  261. int grainFrameLength = static_cast<int>(m_grainDuration * m_buffer->sampleRate());
  262. bool isStereo = sourceR && destinationR;
  263. int n = framesToProcess;
  264. while (n--) {
  265. // Apply the grain envelope.
  266. float x = static_cast<float>(m_grainFrameCount) / static_cast<float>(grainFrameLength);
  267. m_grainFrameCount++;
  268. x = min(1.0f, x);
  269. float grainEnvelope = sinf(piFloat * x);
  270. *destinationL++ = grainEnvelope * *sourceL++;
  271. if (isStereo)
  272. *destinationR++ = grainEnvelope * *sourceR++;
  273. }
  274. }
  275. void AudioBufferSourceNode::reset()
  276. {
  277. m_resampler.reset();
  278. m_readIndex = 0;
  279. m_grainFrameCount = 0;
  280. m_lastGain = gain()->value();
  281. }
  282. void AudioBufferSourceNode::setBuffer(AudioBuffer* buffer)
  283. {
  284. ASSERT(isMainThread());
  285. // The context must be locked since changing the buffer can re-configure the number of channels that are output.
  286. AudioContext::AutoLocker contextLocker(context());
  287. // This synchronizes with process().
  288. MutexLocker processLocker(m_processLock);
  289. if (buffer) {
  290. // Do any necesssary re-configuration to the buffer's number of channels.
  291. unsigned numberOfChannels = buffer->numberOfChannels();
  292. m_resampler.configureChannels(numberOfChannels);
  293. output(0)->setNumberOfChannels(numberOfChannels);
  294. }
  295. m_readIndex = 0;
  296. m_buffer = buffer;
  297. }
  298. unsigned AudioBufferSourceNode::numberOfChannels()
  299. {
  300. return output(0)->numberOfChannels();
  301. }
  302. void AudioBufferSourceNode::noteOn(double when)
  303. {
  304. ASSERT(isMainThread());
  305. if (m_isPlaying)
  306. return;
  307. m_isGrain = false;
  308. m_startTime = when;
  309. m_readIndex = 0;
  310. m_isPlaying = true;
  311. }
  312. void AudioBufferSourceNode::noteGrainOn(double when, double grainOffset, double grainDuration)
  313. {
  314. ASSERT(isMainThread());
  315. if (m_isPlaying)
  316. return;
  317. if (!buffer())
  318. return;
  319. // Do sanity checking of grain parameters versus buffer size.
  320. double bufferDuration = buffer()->duration();
  321. if (grainDuration > bufferDuration)
  322. return; // FIXME: maybe should throw exception - consider in specification.
  323. double maxGrainOffset = bufferDuration - grainDuration;
  324. maxGrainOffset = max(0.0, maxGrainOffset);
  325. grainOffset = max(0.0, grainOffset);
  326. grainOffset = min(maxGrainOffset, grainOffset);
  327. m_grainOffset = grainOffset;
  328. m_grainDuration = grainDuration;
  329. m_grainFrameCount = 0;
  330. m_isGrain = true;
  331. m_startTime = when;
  332. m_readIndex = static_cast<int>(m_grainOffset * buffer()->sampleRate());
  333. m_isPlaying = true;
  334. }
  335. void AudioBufferSourceNode::noteOff(double)
  336. {
  337. ASSERT(isMainThread());
  338. if (!m_isPlaying)
  339. return;
  340. // FIXME: the "when" argument to this method is ignored.
  341. m_isPlaying = false;
  342. m_readIndex = 0;
  343. }
  344. double AudioBufferSourceNode::totalPitchRate()
  345. {
  346. double dopplerRate = 1.0;
  347. if (m_pannerNode.get())
  348. dopplerRate = m_pannerNode->dopplerRate();
  349. // Incorporate buffer's sample-rate versus AudioContext's sample-rate.
  350. // Normally it's not an issue because buffers are loaded at the AudioContext's sample-rate, but we can handle it in any case.
  351. double sampleRateFactor = 1.0;
  352. if (buffer())
  353. sampleRateFactor = buffer()->sampleRate() / sampleRate();
  354. double basePitchRate = playbackRate()->value();
  355. double totalRate = dopplerRate * sampleRateFactor * basePitchRate;
  356. // Sanity check the total rate. It's very important that the resampler not get any bad rate values.
  357. totalRate = max(0.0, totalRate);
  358. totalRate = min(AudioResampler::MaxRate, totalRate);
  359. bool isTotalRateValid = !isnan(totalRate) && !isinf(totalRate);
  360. ASSERT(isTotalRateValid);
  361. if (!isTotalRateValid)
  362. totalRate = 1.0;
  363. return totalRate;
  364. }
  365. } // namespace WebCore
  366. #endif // ENABLE(WEB_AUDIO)