PageRenderTime 41ms CodeModel.GetById 12ms RepoModel.GetById 0ms app.codeStats 0ms

/src/qt/qtwebkit/Source/WebCore/Modules/webaudio/AudioContext.cpp

https://gitlab.com/x33n/phantomjs
C++ | 1017 lines | 726 code | 192 blank | 99 comment | 89 complexity | f41d6599a3e699f1d1b85ff2cb2806a1 MD5 | raw file
  1. /*
  2. * Copyright (C) 2010, Google Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
  14. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  15. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  16. * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
  17. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  18. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  19. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  20. * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  21. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  22. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  23. */
  24. #include "config.h"
  25. #if ENABLE(WEB_AUDIO)
  26. #include "AudioContext.h"
  27. #include "AnalyserNode.h"
  28. #include "AsyncAudioDecoder.h"
  29. #include "AudioBuffer.h"
  30. #include "AudioBufferCallback.h"
  31. #include "AudioBufferSourceNode.h"
  32. #include "AudioListener.h"
  33. #include "AudioNodeInput.h"
  34. #include "AudioNodeOutput.h"
  35. #include "BiquadFilterNode.h"
  36. #include "ChannelMergerNode.h"
  37. #include "ChannelSplitterNode.h"
  38. #include "ConvolverNode.h"
  39. #include "DefaultAudioDestinationNode.h"
  40. #include "DelayNode.h"
  41. #include "Document.h"
  42. #include "DynamicsCompressorNode.h"
  43. #include "ExceptionCode.h"
  44. #include "FFTFrame.h"
  45. #include "GainNode.h"
  46. #include "HRTFDatabaseLoader.h"
  47. #include "HRTFPanner.h"
  48. #include "OfflineAudioCompletionEvent.h"
  49. #include "OfflineAudioDestinationNode.h"
  50. #include "OscillatorNode.h"
  51. #include "Page.h"
  52. #include "PannerNode.h"
  53. #include "PeriodicWave.h"
  54. #include "ScriptCallStack.h"
  55. #include "ScriptController.h"
  56. #include "ScriptProcessorNode.h"
  57. #include "WaveShaperNode.h"
  58. #if ENABLE(MEDIA_STREAM)
  59. #include "MediaStream.h"
  60. #include "MediaStreamAudioDestinationNode.h"
  61. #include "MediaStreamAudioSourceNode.h"
  62. #endif
  63. #if ENABLE(VIDEO)
  64. #include "HTMLMediaElement.h"
  65. #include "MediaElementAudioSourceNode.h"
  66. #endif
  67. #if DEBUG_AUDIONODE_REFERENCES
  68. #include <stdio.h>
  69. #endif
  70. #if USE(GSTREAMER)
  71. #include "GStreamerUtilities.h"
  72. #endif
  73. #include <wtf/ArrayBuffer.h>
  74. #include <wtf/Atomics.h>
  75. #include <wtf/MainThread.h>
  76. #include <wtf/OwnPtr.h>
  77. #include <wtf/PassOwnPtr.h>
  78. #include <wtf/RefCounted.h>
  79. #include <wtf/text/WTFString.h>
  80. // FIXME: check the proper way to reference an undefined thread ID
  81. const int UndefinedThreadIdentifier = 0xffffffff;
  82. const unsigned MaxNodesToDeletePerQuantum = 10;
  83. const unsigned MaxPeriodicWaveLength = 4096;
  84. namespace WebCore {
  85. bool AudioContext::isSampleRateRangeGood(float sampleRate)
  86. {
  87. // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
  88. // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
  89. return sampleRate >= 44100 && sampleRate <= 96000;
  90. }
  91. // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
  92. const unsigned MaxHardwareContexts = 4;
  93. unsigned AudioContext::s_hardwareContextCount = 0;
  94. PassRefPtr<AudioContext> AudioContext::create(Document* document, ExceptionCode& ec)
  95. {
  96. UNUSED_PARAM(ec);
  97. ASSERT(document);
  98. ASSERT(isMainThread());
  99. if (s_hardwareContextCount >= MaxHardwareContexts)
  100. return 0;
  101. RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
  102. audioContext->suspendIfNeeded();
  103. return audioContext.release();
  104. }
  105. // Constructor for rendering to the audio hardware.
  106. AudioContext::AudioContext(Document* document)
  107. : ActiveDOMObject(document)
  108. , m_isStopScheduled(false)
  109. , m_isInitialized(false)
  110. , m_isAudioThreadFinished(false)
  111. , m_destinationNode(0)
  112. , m_isDeletionScheduled(false)
  113. , m_automaticPullNodesNeedUpdating(false)
  114. , m_connectionCount(0)
  115. , m_audioThread(0)
  116. , m_graphOwnerThread(UndefinedThreadIdentifier)
  117. , m_isOfflineContext(false)
  118. , m_activeSourceCount(0)
  119. , m_restrictions(NoRestrictions)
  120. {
  121. constructCommon();
  122. m_destinationNode = DefaultAudioDestinationNode::create(this);
  123. // This sets in motion an asynchronous loading mechanism on another thread.
  124. // We can check m_hrtfDatabaseLoader->isLoaded() to find out whether or not it has been fully loaded.
  125. // It's not that useful to have a callback function for this since the audio thread automatically starts rendering on the graph
  126. // when this has finished (see AudioDestinationNode).
  127. m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate());
  128. }
  129. // Constructor for offline (non-realtime) rendering.
  130. AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
  131. : ActiveDOMObject(document)
  132. , m_isStopScheduled(false)
  133. , m_isInitialized(false)
  134. , m_isAudioThreadFinished(false)
  135. , m_destinationNode(0)
  136. , m_automaticPullNodesNeedUpdating(false)
  137. , m_connectionCount(0)
  138. , m_audioThread(0)
  139. , m_graphOwnerThread(UndefinedThreadIdentifier)
  140. , m_isOfflineContext(true)
  141. , m_activeSourceCount(0)
  142. , m_restrictions(NoRestrictions)
  143. {
  144. constructCommon();
  145. // FIXME: the passed in sampleRate MUST match the hardware sample-rate since HRTFDatabaseLoader is a singleton.
  146. m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate);
  147. // Create a new destination for offline rendering.
  148. m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
  149. m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
  150. }
  151. void AudioContext::constructCommon()
  152. {
  153. // According to spec AudioContext must die only after page navigate.
  154. // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
  155. setPendingActivity(this);
  156. #if USE(GSTREAMER)
  157. initializeGStreamer();
  158. #endif
  159. FFTFrame::initialize();
  160. m_listener = AudioListener::create();
  161. #if PLATFORM(IOS)
  162. if (!document()->settings() || document()->settings()->mediaPlaybackRequiresUserGesture())
  163. addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
  164. else
  165. m_restrictions = NoRestrictions;
  166. #endif
  167. #if PLATFORM(MAC)
  168. addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
  169. #endif
  170. }
  171. AudioContext::~AudioContext()
  172. {
  173. #if DEBUG_AUDIONODE_REFERENCES
  174. fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
  175. #endif
  176. // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
  177. ASSERT(!m_isInitialized);
  178. ASSERT(m_isStopScheduled);
  179. ASSERT(!m_nodesToDelete.size());
  180. ASSERT(!m_referencedNodes.size());
  181. ASSERT(!m_finishedNodes.size());
  182. ASSERT(!m_automaticPullNodes.size());
  183. if (m_automaticPullNodesNeedUpdating)
  184. m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
  185. ASSERT(!m_renderingAutomaticPullNodes.size());
  186. }
  187. void AudioContext::lazyInitialize()
  188. {
  189. if (!m_isInitialized) {
  190. // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
  191. ASSERT(!m_isAudioThreadFinished);
  192. if (!m_isAudioThreadFinished) {
  193. if (m_destinationNode.get()) {
  194. m_destinationNode->initialize();
  195. if (!isOfflineContext()) {
  196. // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
  197. // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
  198. // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
  199. // We may want to consider requiring it for symmetry with OfflineAudioContext.
  200. startRendering();
  201. ++s_hardwareContextCount;
  202. }
  203. }
  204. m_isInitialized = true;
  205. }
  206. }
  207. }
  208. void AudioContext::clear()
  209. {
  210. // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
  211. if (m_destinationNode)
  212. m_destinationNode.clear();
  213. // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
  214. do {
  215. deleteMarkedNodes();
  216. m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
  217. m_nodesMarkedForDeletion.clear();
  218. } while (m_nodesToDelete.size());
  219. // It was set in constructCommon.
  220. unsetPendingActivity(this);
  221. }
  222. void AudioContext::uninitialize()
  223. {
  224. ASSERT(isMainThread());
  225. if (!m_isInitialized)
  226. return;
  227. // This stops the audio thread and all audio rendering.
  228. m_destinationNode->uninitialize();
  229. // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
  230. m_isAudioThreadFinished = true;
  231. if (!isOfflineContext()) {
  232. ASSERT(s_hardwareContextCount);
  233. --s_hardwareContextCount;
  234. }
  235. // Get rid of the sources which may still be playing.
  236. derefUnfinishedSourceNodes();
  237. m_isInitialized = false;
  238. }
  239. bool AudioContext::isInitialized() const
  240. {
  241. return m_isInitialized;
  242. }
  243. bool AudioContext::isRunnable() const
  244. {
  245. if (!isInitialized())
  246. return false;
  247. // Check with the HRTF spatialization system to see if it's finished loading.
  248. return m_hrtfDatabaseLoader->isLoaded();
  249. }
  250. void AudioContext::stopDispatch(void* userData)
  251. {
  252. AudioContext* context = reinterpret_cast<AudioContext*>(userData);
  253. ASSERT(context);
  254. if (!context)
  255. return;
  256. context->uninitialize();
  257. context->clear();
  258. }
  259. void AudioContext::stop()
  260. {
  261. // Usually ScriptExecutionContext calls stop twice.
  262. if (m_isStopScheduled)
  263. return;
  264. m_isStopScheduled = true;
  265. // Don't call uninitialize() immediately here because the ScriptExecutionContext is in the middle
  266. // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
  267. // ActiveDOMObjects so let's schedule uninitialize() to be called later.
  268. // FIXME: see if there's a more direct way to handle this issue.
  269. callOnMainThread(stopDispatch, this);
  270. }
  271. Document* AudioContext::document() const
  272. {
  273. ASSERT(m_scriptExecutionContext && m_scriptExecutionContext->isDocument());
  274. return static_cast<Document*>(m_scriptExecutionContext);
  275. }
  276. PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
  277. {
  278. RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
  279. if (!audioBuffer.get()) {
  280. ec = NOT_SUPPORTED_ERR;
  281. return 0;
  282. }
  283. return audioBuffer;
  284. }
  285. PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionCode& ec)
  286. {
  287. ASSERT(arrayBuffer);
  288. if (!arrayBuffer) {
  289. ec = SYNTAX_ERR;
  290. return 0;
  291. }
  292. RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
  293. if (!audioBuffer.get()) {
  294. ec = SYNTAX_ERR;
  295. return 0;
  296. }
  297. return audioBuffer;
  298. }
  299. void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, ExceptionCode& ec)
  300. {
  301. if (!audioData) {
  302. ec = SYNTAX_ERR;
  303. return;
  304. }
  305. m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
  306. }
  307. PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
  308. {
  309. ASSERT(isMainThread());
  310. lazyInitialize();
  311. RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
  312. // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
  313. // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
  314. refNode(node.get());
  315. return node;
  316. }
  317. #if ENABLE(VIDEO)
  318. PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionCode& ec)
  319. {
  320. ASSERT(mediaElement);
  321. if (!mediaElement) {
  322. ec = INVALID_STATE_ERR;
  323. return 0;
  324. }
  325. ASSERT(isMainThread());
  326. lazyInitialize();
  327. // First check if this media element already has a source node.
  328. if (mediaElement->audioSourceNode()) {
  329. ec = INVALID_STATE_ERR;
  330. return 0;
  331. }
  332. RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
  333. mediaElement->setAudioSourceNode(node.get());
  334. refNode(node.get()); // context keeps reference until node is disconnected
  335. return node;
  336. }
  337. #endif
  338. #if ENABLE(MEDIA_STREAM)
  339. PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionCode& ec)
  340. {
  341. ASSERT(mediaStream);
  342. if (!mediaStream) {
  343. ec = INVALID_STATE_ERR;
  344. return 0;
  345. }
  346. ASSERT(isMainThread());
  347. lazyInitialize();
  348. AudioSourceProvider* provider = 0;
  349. MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
  350. if (mediaStream->isLocal() && audioTracks.size()) {
  351. // Enable input for the specific local audio device specified in the MediaStreamSource.
  352. RefPtr<MediaStreamTrack> localAudio = audioTracks[0];
  353. MediaStreamSource* source = localAudio->component()->source();
  354. destination()->enableInput(source->deviceId());
  355. provider = destination()->localAudioInputProvider();
  356. } else {
  357. // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
  358. provider = 0;
  359. }
  360. RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, provider);
  361. // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
  362. node->setFormat(2, sampleRate());
  363. refNode(node.get()); // context keeps reference until node is disconnected
  364. return node;
  365. }
  366. PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
  367. {
  368. // FIXME: Add support for an optional argument which specifies the number of channels.
  369. // FIXME: The default should probably be stereo instead of mono.
  370. return MediaStreamAudioDestinationNode::create(this, 1);
  371. }
  372. #endif
  373. PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionCode& ec)
  374. {
  375. // Set number of input/output channels to stereo by default.
  376. return createScriptProcessor(bufferSize, 2, 2, ec);
  377. }
  378. PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode& ec)
  379. {
  380. // Set number of output channels to stereo by default.
  381. return createScriptProcessor(bufferSize, numberOfInputChannels, 2, ec);
  382. }
  383. PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode& ec)
  384. {
  385. ASSERT(isMainThread());
  386. lazyInitialize();
  387. RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
  388. if (!node.get()) {
  389. ec = INDEX_SIZE_ERR;
  390. return 0;
  391. }
  392. refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
  393. return node;
  394. }
  395. PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
  396. {
  397. ASSERT(isMainThread());
  398. lazyInitialize();
  399. return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
  400. }
  401. PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
  402. {
  403. ASSERT(isMainThread());
  404. lazyInitialize();
  405. return WaveShaperNode::create(this);
  406. }
  407. PassRefPtr<PannerNode> AudioContext::createPanner()
  408. {
  409. ASSERT(isMainThread());
  410. lazyInitialize();
  411. return PannerNode::create(this, m_destinationNode->sampleRate());
  412. }
  413. PassRefPtr<ConvolverNode> AudioContext::createConvolver()
  414. {
  415. ASSERT(isMainThread());
  416. lazyInitialize();
  417. return ConvolverNode::create(this, m_destinationNode->sampleRate());
  418. }
  419. PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
  420. {
  421. ASSERT(isMainThread());
  422. lazyInitialize();
  423. return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
  424. }
  425. PassRefPtr<AnalyserNode> AudioContext::createAnalyser()
  426. {
  427. ASSERT(isMainThread());
  428. lazyInitialize();
  429. return AnalyserNode::create(this, m_destinationNode->sampleRate());
  430. }
  431. PassRefPtr<GainNode> AudioContext::createGain()
  432. {
  433. ASSERT(isMainThread());
  434. lazyInitialize();
  435. return GainNode::create(this, m_destinationNode->sampleRate());
  436. }
  437. PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionCode& ec)
  438. {
  439. const double defaultMaxDelayTime = 1;
  440. return createDelay(defaultMaxDelayTime, ec);
  441. }
  442. PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionCode& ec)
  443. {
  444. ASSERT(isMainThread());
  445. lazyInitialize();
  446. RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, ec);
  447. if (ec)
  448. return 0;
  449. return node;
  450. }
  451. PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionCode& ec)
  452. {
  453. const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
  454. return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, ec);
  455. }
  456. PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionCode& ec)
  457. {
  458. ASSERT(isMainThread());
  459. lazyInitialize();
  460. RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
  461. if (!node.get()) {
  462. ec = SYNTAX_ERR;
  463. return 0;
  464. }
  465. return node;
  466. }
  467. PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionCode& ec)
  468. {
  469. const unsigned ChannelMergerDefaultNumberOfInputs = 6;
  470. return createChannelMerger(ChannelMergerDefaultNumberOfInputs, ec);
  471. }
  472. PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionCode& ec)
  473. {
  474. ASSERT(isMainThread());
  475. lazyInitialize();
  476. RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
  477. if (!node.get()) {
  478. ec = SYNTAX_ERR;
  479. return 0;
  480. }
  481. return node;
  482. }
  483. PassRefPtr<OscillatorNode> AudioContext::createOscillator()
  484. {
  485. ASSERT(isMainThread());
  486. lazyInitialize();
  487. RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
  488. // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
  489. // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
  490. refNode(node.get());
  491. return node;
  492. }
  493. PassRefPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionCode& ec)
  494. {
  495. ASSERT(isMainThread());
  496. if (!real || !imag || (real->length() != imag->length() || (real->length() > MaxPeriodicWaveLength) || (real->length() <= 0))) {
  497. ec = SYNTAX_ERR;
  498. return 0;
  499. }
  500. lazyInitialize();
  501. return PeriodicWave::create(sampleRate(), real, imag);
  502. }
  503. void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
  504. {
  505. ASSERT(isAudioThread());
  506. m_finishedNodes.append(node);
  507. }
  508. void AudioContext::derefFinishedSourceNodes()
  509. {
  510. ASSERT(isGraphOwner());
  511. ASSERT(isAudioThread() || isAudioThreadFinished());
  512. for (unsigned i = 0; i < m_finishedNodes.size(); i++)
  513. derefNode(m_finishedNodes[i]);
  514. m_finishedNodes.clear();
  515. }
  516. void AudioContext::refNode(AudioNode* node)
  517. {
  518. ASSERT(isMainThread());
  519. AutoLocker locker(this);
  520. node->ref(AudioNode::RefTypeConnection);
  521. m_referencedNodes.append(node);
  522. }
  523. void AudioContext::derefNode(AudioNode* node)
  524. {
  525. ASSERT(isGraphOwner());
  526. node->deref(AudioNode::RefTypeConnection);
  527. for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
  528. if (node == m_referencedNodes[i]) {
  529. m_referencedNodes.remove(i);
  530. break;
  531. }
  532. }
  533. }
  534. void AudioContext::derefUnfinishedSourceNodes()
  535. {
  536. ASSERT(isMainThread() && isAudioThreadFinished());
  537. for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
  538. m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
  539. m_referencedNodes.clear();
  540. }
  541. void AudioContext::lock(bool& mustReleaseLock)
  542. {
  543. // Don't allow regular lock in real-time audio thread.
  544. ASSERT(isMainThread());
  545. ThreadIdentifier thisThread = currentThread();
  546. if (thisThread == m_graphOwnerThread) {
  547. // We already have the lock.
  548. mustReleaseLock = false;
  549. } else {
  550. // Acquire the lock.
  551. m_contextGraphMutex.lock();
  552. m_graphOwnerThread = thisThread;
  553. mustReleaseLock = true;
  554. }
  555. }
  556. bool AudioContext::tryLock(bool& mustReleaseLock)
  557. {
  558. ThreadIdentifier thisThread = currentThread();
  559. bool isAudioThread = thisThread == audioThread();
  560. // Try to catch cases of using try lock on main thread - it should use regular lock.
  561. ASSERT(isAudioThread || isAudioThreadFinished());
  562. if (!isAudioThread) {
  563. // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
  564. lock(mustReleaseLock);
  565. return true;
  566. }
  567. bool hasLock;
  568. if (thisThread == m_graphOwnerThread) {
  569. // Thread already has the lock.
  570. hasLock = true;
  571. mustReleaseLock = false;
  572. } else {
  573. // Don't already have the lock - try to acquire it.
  574. hasLock = m_contextGraphMutex.tryLock();
  575. if (hasLock)
  576. m_graphOwnerThread = thisThread;
  577. mustReleaseLock = hasLock;
  578. }
  579. return hasLock;
  580. }
  581. void AudioContext::unlock()
  582. {
  583. ASSERT(currentThread() == m_graphOwnerThread);
  584. m_graphOwnerThread = UndefinedThreadIdentifier;
  585. m_contextGraphMutex.unlock();
  586. }
  587. bool AudioContext::isAudioThread() const
  588. {
  589. return currentThread() == m_audioThread;
  590. }
  591. bool AudioContext::isGraphOwner() const
  592. {
  593. return currentThread() == m_graphOwnerThread;
  594. }
  595. void AudioContext::addDeferredFinishDeref(AudioNode* node)
  596. {
  597. ASSERT(isAudioThread());
  598. m_deferredFinishDerefList.append(node);
  599. }
  600. void AudioContext::handlePreRenderTasks()
  601. {
  602. ASSERT(isAudioThread());
  603. // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
  604. // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
  605. bool mustReleaseLock;
  606. if (tryLock(mustReleaseLock)) {
  607. // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
  608. handleDirtyAudioSummingJunctions();
  609. handleDirtyAudioNodeOutputs();
  610. updateAutomaticPullNodes();
  611. if (mustReleaseLock)
  612. unlock();
  613. }
  614. }
  615. void AudioContext::handlePostRenderTasks()
  616. {
  617. ASSERT(isAudioThread());
  618. // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
  619. // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
  620. // from the render graph (in which case they'll render silence).
  621. bool mustReleaseLock;
  622. if (tryLock(mustReleaseLock)) {
  623. // Take care of finishing any derefs where the tryLock() failed previously.
  624. handleDeferredFinishDerefs();
  625. // Dynamically clean up nodes which are no longer needed.
  626. derefFinishedSourceNodes();
  627. // Don't delete in the real-time thread. Let the main thread do it.
  628. // Ref-counted objects held by certain AudioNodes may not be thread-safe.
  629. scheduleNodeDeletion();
  630. // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
  631. handleDirtyAudioSummingJunctions();
  632. handleDirtyAudioNodeOutputs();
  633. updateAutomaticPullNodes();
  634. if (mustReleaseLock)
  635. unlock();
  636. }
  637. }
  638. void AudioContext::handleDeferredFinishDerefs()
  639. {
  640. ASSERT(isAudioThread() && isGraphOwner());
  641. for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
  642. AudioNode* node = m_deferredFinishDerefList[i];
  643. node->finishDeref(AudioNode::RefTypeConnection);
  644. }
  645. m_deferredFinishDerefList.clear();
  646. }
  647. void AudioContext::markForDeletion(AudioNode* node)
  648. {
  649. ASSERT(isGraphOwner());
  650. if (isAudioThreadFinished())
  651. m_nodesToDelete.append(node);
  652. else
  653. m_nodesMarkedForDeletion.append(node);
  654. // This is probably the best time for us to remove the node from automatic pull list,
  655. // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
  656. // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
  657. // modify m_renderingAutomaticPullNodes.
  658. removeAutomaticPullNode(node);
  659. }
  660. void AudioContext::scheduleNodeDeletion()
  661. {
  662. bool isGood = m_isInitialized && isGraphOwner();
  663. ASSERT(isGood);
  664. if (!isGood)
  665. return;
  666. // Make sure to call deleteMarkedNodes() on main thread.
  667. if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
  668. m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
  669. m_nodesMarkedForDeletion.clear();
  670. m_isDeletionScheduled = true;
  671. // Don't let ourself get deleted before the callback.
  672. // See matching deref() in deleteMarkedNodesDispatch().
  673. ref();
  674. callOnMainThread(deleteMarkedNodesDispatch, this);
  675. }
  676. }
  677. void AudioContext::deleteMarkedNodesDispatch(void* userData)
  678. {
  679. AudioContext* context = reinterpret_cast<AudioContext*>(userData);
  680. ASSERT(context);
  681. if (!context)
  682. return;
  683. context->deleteMarkedNodes();
  684. context->deref();
  685. }
  686. void AudioContext::deleteMarkedNodes()
  687. {
  688. ASSERT(isMainThread());
  689. // Protect this object from being deleted before we release the mutex locked by AutoLocker.
  690. RefPtr<AudioContext> protect(this);
  691. {
  692. AutoLocker locker(this);
  693. while (size_t n = m_nodesToDelete.size()) {
  694. AudioNode* node = m_nodesToDelete[n - 1];
  695. m_nodesToDelete.removeLast();
  696. // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
  697. unsigned numberOfInputs = node->numberOfInputs();
  698. for (unsigned i = 0; i < numberOfInputs; ++i)
  699. m_dirtySummingJunctions.remove(node->input(i));
  700. // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
  701. unsigned numberOfOutputs = node->numberOfOutputs();
  702. for (unsigned i = 0; i < numberOfOutputs; ++i)
  703. m_dirtyAudioNodeOutputs.remove(node->output(i));
  704. // Finally, delete it.
  705. delete node;
  706. }
  707. m_isDeletionScheduled = false;
  708. }
  709. }
  710. void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
  711. {
  712. ASSERT(isGraphOwner());
  713. m_dirtySummingJunctions.add(summingJunction);
  714. }
  715. void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
  716. {
  717. ASSERT(isMainThread());
  718. AutoLocker locker(this);
  719. m_dirtySummingJunctions.remove(summingJunction);
  720. }
  721. void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
  722. {
  723. ASSERT(isGraphOwner());
  724. m_dirtyAudioNodeOutputs.add(output);
  725. }
  726. void AudioContext::handleDirtyAudioSummingJunctions()
  727. {
  728. ASSERT(isGraphOwner());
  729. for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
  730. (*i)->updateRenderingState();
  731. m_dirtySummingJunctions.clear();
  732. }
  733. void AudioContext::handleDirtyAudioNodeOutputs()
  734. {
  735. ASSERT(isGraphOwner());
  736. for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
  737. (*i)->updateRenderingState();
  738. m_dirtyAudioNodeOutputs.clear();
  739. }
  740. void AudioContext::addAutomaticPullNode(AudioNode* node)
  741. {
  742. ASSERT(isGraphOwner());
  743. if (!m_automaticPullNodes.contains(node)) {
  744. m_automaticPullNodes.add(node);
  745. m_automaticPullNodesNeedUpdating = true;
  746. }
  747. }
  748. void AudioContext::removeAutomaticPullNode(AudioNode* node)
  749. {
  750. ASSERT(isGraphOwner());
  751. if (m_automaticPullNodes.contains(node)) {
  752. m_automaticPullNodes.remove(node);
  753. m_automaticPullNodesNeedUpdating = true;
  754. }
  755. }
  756. void AudioContext::updateAutomaticPullNodes()
  757. {
  758. ASSERT(isGraphOwner());
  759. if (m_automaticPullNodesNeedUpdating) {
  760. // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
  761. m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
  762. unsigned j = 0;
  763. for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
  764. AudioNode* output = *i;
  765. m_renderingAutomaticPullNodes[j] = output;
  766. }
  767. m_automaticPullNodesNeedUpdating = false;
  768. }
  769. }
  770. void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
  771. {
  772. ASSERT(isAudioThread());
  773. for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
  774. m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
  775. }
  776. const AtomicString& AudioContext::interfaceName() const
  777. {
  778. return eventNames().interfaceForAudioContext;
  779. }
  780. ScriptExecutionContext* AudioContext::scriptExecutionContext() const
  781. {
  782. return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext();
  783. }
  784. void AudioContext::startRendering()
  785. {
  786. if (ScriptController::processingUserGesture())
  787. removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
  788. if (pageConsentRequiredForAudioStart()) {
  789. Page* page = document()->page();
  790. if (page && !page->canStartMedia())
  791. document()->addMediaCanStartListener(this);
  792. else
  793. removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
  794. }
  795. destination()->startRendering();
  796. }
  797. void AudioContext::mediaCanStart()
  798. {
  799. removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
  800. }
  801. void AudioContext::fireCompletionEvent()
  802. {
  803. ASSERT(isMainThread());
  804. if (!isMainThread())
  805. return;
  806. AudioBuffer* renderedBuffer = m_renderTarget.get();
  807. ASSERT(renderedBuffer);
  808. if (!renderedBuffer)
  809. return;
  810. // Avoid firing the event if the document has already gone away.
  811. if (scriptExecutionContext()) {
  812. // Call the offline rendering completion event listener.
  813. dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
  814. }
  815. }
  816. void AudioContext::incrementActiveSourceCount()
  817. {
  818. atomicIncrement(&m_activeSourceCount);
  819. }
  820. void AudioContext::decrementActiveSourceCount()
  821. {
  822. atomicDecrement(&m_activeSourceCount);
  823. }
  824. } // namespace WebCore
  825. #endif // ENABLE(WEB_AUDIO)