PageRenderTime 54ms CodeModel.GetById 26ms RepoModel.GetById 0ms app.codeStats 0ms

/webview/native/Source/WebCore/platform/audio/HRTFElevation.cpp

https://bitbucket.org/shemnon/openjfx-8-master-rt
C++ | 342 lines | 213 code | 63 blank | 66 comment | 42 complexity | d879c072b99fdf81326969a8a50b814e MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, BSD-3-Clause, LGPL-2.1
  1. /*
  2. * Copyright (C) 2010 Google Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
  14. * its contributors may be used to endorse or promote products derived
  15. * from this software without specific prior written permission.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
  18. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  19. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  20. * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
  21. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  22. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  23. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  24. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  25. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  26. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  27. */
  28. #include "config.h"
  29. #if ENABLE(WEB_AUDIO)
  30. #include "HRTFElevation.h"
  31. #include "AudioBus.h"
  32. #include "AudioFileReader.h"
  33. #include "Biquad.h"
  34. #include "FFTFrame.h"
  35. #include "HRTFPanner.h"
  36. #include <algorithm>
  37. #include <math.h>
  38. #include <wtf/OwnPtr.h>
  39. using namespace std;
  40. namespace WebCore {
  41. const unsigned HRTFElevation::AzimuthSpacing = 15;
  42. const unsigned HRTFElevation::NumberOfRawAzimuths = 360 / AzimuthSpacing;
  43. const unsigned HRTFElevation::InterpolationFactor = 8;
  44. const unsigned HRTFElevation::NumberOfTotalAzimuths = NumberOfRawAzimuths * InterpolationFactor;
  45. // Total number of components of an HRTF database.
  46. const size_t TotalNumberOfResponses = 240;
  47. // Number of frames in an individual impulse response.
  48. const size_t ResponseFrameSize = 256;
  49. // Sample-rate of the spatialization impulse responses as stored in the resource file.
  50. // The impulse responses may be resampled to a different sample-rate (depending on the audio hardware) when they are loaded.
  51. const float ResponseSampleRate = 44100;
  52. #if PLATFORM(GTK) || PLATFORM(MAC) || PLATFORM(EFL)
  53. #define USE_CONCATENATED_IMPULSE_RESPONSES
  54. #endif
  55. #ifdef USE_CONCATENATED_IMPULSE_RESPONSES
  56. // Lazily load a concatenated HRTF database for given subject and store it in a
  57. // local hash table to ensure quick efficient future retrievals.
  58. static AudioBus* getConcatenatedImpulseResponsesForSubject(const String& subjectName)
  59. {
  60. typedef HashMap<String, AudioBus*> AudioBusMap;
  61. DEFINE_STATIC_LOCAL(AudioBusMap, audioBusMap, ());
  62. AudioBus* bus;
  63. AudioBusMap::iterator iterator = audioBusMap.find(subjectName);
  64. if (iterator == audioBusMap.end()) {
  65. OwnPtr<AudioBus> concatenatedImpulseResponses = AudioBus::loadPlatformResource(subjectName.utf8().data(), ResponseSampleRate);
  66. bus = concatenatedImpulseResponses.leakPtr();
  67. audioBusMap.set(subjectName, bus);
  68. } else
  69. bus = iterator->second;
  70. size_t responseLength = bus->length();
  71. size_t expectedLength = static_cast<size_t>(TotalNumberOfResponses * ResponseFrameSize);
  72. // Check number of channels and length. For now these are fixed and known.
  73. bool isBusGood = responseLength == expectedLength && bus->numberOfChannels() == 2;
  74. ASSERT(isBusGood);
  75. if (!isBusGood)
  76. return 0;
  77. return bus;
  78. }
  79. #endif
  80. // Takes advantage of the symmetry and creates a composite version of the two measured versions. For example, we have both azimuth 30 and -30 degrees
  81. // where the roles of left and right ears are reversed with respect to each other.
  82. bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
  83. RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
  84. {
  85. RefPtr<HRTFKernel> kernelL1;
  86. RefPtr<HRTFKernel> kernelR1;
  87. bool success = calculateKernelsForAzimuthElevation(azimuth, elevation, sampleRate, subjectName, kernelL1, kernelR1);
  88. if (!success)
  89. return false;
  90. // And symmetric version
  91. int symmetricAzimuth = !azimuth ? 0 : 360 - azimuth;
  92. RefPtr<HRTFKernel> kernelL2;
  93. RefPtr<HRTFKernel> kernelR2;
  94. success = calculateKernelsForAzimuthElevation(symmetricAzimuth, elevation, sampleRate, subjectName, kernelL2, kernelR2);
  95. if (!success)
  96. return false;
  97. // Notice L/R reversal in symmetric version.
  98. kernelL = HRTFKernel::createInterpolatedKernel(kernelL1.get(), kernelR2.get(), 0.5f);
  99. kernelR = HRTFKernel::createInterpolatedKernel(kernelR1.get(), kernelL2.get(), 0.5f);
  100. return true;
  101. }
  102. bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
  103. RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
  104. {
  105. // Valid values for azimuth are 0 -> 345 in 15 degree increments.
  106. // Valid values for elevation are -45 -> +90 in 15 degree increments.
  107. bool isAzimuthGood = azimuth >= 0 && azimuth <= 345 && (azimuth / 15) * 15 == azimuth;
  108. ASSERT(isAzimuthGood);
  109. if (!isAzimuthGood)
  110. return false;
  111. bool isElevationGood = elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation;
  112. ASSERT(isElevationGood);
  113. if (!isElevationGood)
  114. return false;
  115. // Construct the resource name from the subject name, azimuth, and elevation, for example:
  116. // "IRC_Composite_C_R0195_T015_P000"
  117. // Note: the passed in subjectName is not a string passed in via JavaScript or the web.
  118. // It's passed in as an internal ASCII identifier and is an implementation detail.
  119. int positiveElevation = elevation < 0 ? elevation + 360 : elevation;
  120. #ifdef USE_CONCATENATED_IMPULSE_RESPONSES
  121. AudioBus* bus(getConcatenatedImpulseResponsesForSubject(subjectName));
  122. if (!bus)
  123. return false;
  124. int elevationIndex = positiveElevation / AzimuthSpacing;
  125. if (positiveElevation > 90)
  126. elevationIndex -= AzimuthSpacing;
  127. // The concatenated impulse response is a bus containing all
  128. // the elevations per azimuth, for all azimuths by increasing
  129. // order. So for a given azimuth and elevation we need to compute
  130. // the index of the wanted audio frames in the concatenated table.
  131. unsigned index = ((azimuth / AzimuthSpacing) * HRTFDatabase::NumberOfRawElevations) + elevationIndex;
  132. bool isIndexGood = index < TotalNumberOfResponses;
  133. ASSERT(isIndexGood);
  134. if (!isIndexGood)
  135. return false;
  136. // Extract the individual impulse response from the concatenated
  137. // responses and potentially sample-rate convert it to the desired
  138. // (hardware) sample-rate.
  139. unsigned startFrame = index * ResponseFrameSize;
  140. unsigned stopFrame = startFrame + ResponseFrameSize;
  141. OwnPtr<AudioBus> preSampleRateConvertedResponse = AudioBus::createBufferFromRange(bus, startFrame, stopFrame);
  142. OwnPtr<AudioBus> response = AudioBus::createBySampleRateConverting(preSampleRateConvertedResponse.get(), false, sampleRate);
  143. AudioChannel* leftEarImpulseResponse = response->channel(AudioBus::ChannelLeft);
  144. AudioChannel* rightEarImpulseResponse = response->channel(AudioBus::ChannelRight);
  145. #else
  146. String resourceName = String::format("IRC_%s_C_R0195_T%03d_P%03d", subjectName.utf8().data(), azimuth, positiveElevation);
  147. OwnPtr<AudioBus> impulseResponse(AudioBus::loadPlatformResource(resourceName.utf8().data(), sampleRate));
  148. ASSERT(impulseResponse.get());
  149. if (!impulseResponse.get())
  150. return false;
  151. size_t responseLength = impulseResponse->length();
  152. size_t expectedLength = static_cast<size_t>(256 * (sampleRate / 44100.0));
  153. // Check number of channels and length. For now these are fixed and known.
  154. bool isBusGood = responseLength == expectedLength && impulseResponse->numberOfChannels() == 2;
  155. ASSERT(isBusGood);
  156. if (!isBusGood)
  157. return false;
  158. AudioChannel* leftEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelLeft);
  159. AudioChannel* rightEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelRight);
  160. #endif
  161. // Note that depending on the fftSize returned by the panner, we may be truncating the impulse response we just loaded in.
  162. const size_t fftSize = HRTFPanner::fftSizeForSampleRate(sampleRate);
  163. kernelL = HRTFKernel::create(leftEarImpulseResponse, fftSize, sampleRate, true);
  164. kernelR = HRTFKernel::create(rightEarImpulseResponse, fftSize, sampleRate, true);
  165. return true;
  166. }
  167. // The range of elevations for the IRCAM impulse responses varies depending on azimuth, but the minimum elevation appears to always be -45.
  168. //
  169. // Here's how it goes:
  170. static int maxElevations[] = {
  171. // Azimuth
  172. //
  173. 90, // 0
  174. 45, // 15
  175. 60, // 30
  176. 45, // 45
  177. 75, // 60
  178. 45, // 75
  179. 60, // 90
  180. 45, // 105
  181. 75, // 120
  182. 45, // 135
  183. 60, // 150
  184. 45, // 165
  185. 75, // 180
  186. 45, // 195
  187. 60, // 210
  188. 45, // 225
  189. 75, // 240
  190. 45, // 255
  191. 60, // 270
  192. 45, // 285
  193. 75, // 300
  194. 45, // 315
  195. 60, // 330
  196. 45 // 345
  197. };
  198. PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, float sampleRate)
  199. {
  200. bool isElevationGood = elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation;
  201. ASSERT(isElevationGood);
  202. if (!isElevationGood)
  203. return nullptr;
  204. OwnPtr<HRTFKernelList> kernelListL = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));
  205. OwnPtr<HRTFKernelList> kernelListR = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));
  206. // Load convolution kernels from HRTF files.
  207. int interpolatedIndex = 0;
  208. for (unsigned rawIndex = 0; rawIndex < NumberOfRawAzimuths; ++rawIndex) {
  209. // Don't let elevation exceed maximum for this azimuth.
  210. int maxElevation = maxElevations[rawIndex];
  211. int actualElevation = min(elevation, maxElevation);
  212. bool success = calculateKernelsForAzimuthElevation(rawIndex * AzimuthSpacing, actualElevation, sampleRate, subjectName, kernelListL->at(interpolatedIndex), kernelListR->at(interpolatedIndex));
  213. if (!success)
  214. return nullptr;
  215. interpolatedIndex += InterpolationFactor;
  216. }
  217. // Now go back and interpolate intermediate azimuth values.
  218. for (unsigned i = 0; i < NumberOfTotalAzimuths; i += InterpolationFactor) {
  219. int j = (i + InterpolationFactor) % NumberOfTotalAzimuths;
  220. // Create the interpolated convolution kernels and delays.
  221. for (unsigned jj = 1; jj < InterpolationFactor; ++jj) {
  222. float x = float(jj) / float(InterpolationFactor); // interpolate from 0 -> 1
  223. (*kernelListL)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListL->at(i).get(), kernelListL->at(j).get(), x);
  224. (*kernelListR)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListR->at(i).get(), kernelListR->at(j).get(), x);
  225. }
  226. }
  227. OwnPtr<HRTFElevation> hrtfElevation = adoptPtr(new HRTFElevation(kernelListL.release(), kernelListR.release(), elevation, sampleRate));
  228. return hrtfElevation.release();
  229. }
  230. PassOwnPtr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate)
  231. {
  232. ASSERT(hrtfElevation1 && hrtfElevation2);
  233. if (!hrtfElevation1 || !hrtfElevation2)
  234. return nullptr;
  235. ASSERT(x >= 0.0 && x < 1.0);
  236. OwnPtr<HRTFKernelList> kernelListL = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));
  237. OwnPtr<HRTFKernelList> kernelListR = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));
  238. HRTFKernelList* kernelListL1 = hrtfElevation1->kernelListL();
  239. HRTFKernelList* kernelListR1 = hrtfElevation1->kernelListR();
  240. HRTFKernelList* kernelListL2 = hrtfElevation2->kernelListL();
  241. HRTFKernelList* kernelListR2 = hrtfElevation2->kernelListR();
  242. // Interpolate kernels of corresponding azimuths of the two elevations.
  243. for (unsigned i = 0; i < NumberOfTotalAzimuths; ++i) {
  244. (*kernelListL)[i] = HRTFKernel::createInterpolatedKernel(kernelListL1->at(i).get(), kernelListL2->at(i).get(), x);
  245. (*kernelListR)[i] = HRTFKernel::createInterpolatedKernel(kernelListR1->at(i).get(), kernelListR2->at(i).get(), x);
  246. }
  247. // Interpolate elevation angle.
  248. double angle = (1.0 - x) * hrtfElevation1->elevationAngle() + x * hrtfElevation2->elevationAngle();
  249. OwnPtr<HRTFElevation> hrtfElevation = adoptPtr(new HRTFElevation(kernelListL.release(), kernelListR.release(), static_cast<int>(angle), sampleRate));
  250. return hrtfElevation.release();
  251. }
  252. void HRTFElevation::getKernelsFromAzimuth(double azimuthBlend, unsigned azimuthIndex, HRTFKernel* &kernelL, HRTFKernel* &kernelR, double& frameDelayL, double& frameDelayR)
  253. {
  254. bool checkAzimuthBlend = azimuthBlend >= 0.0 && azimuthBlend < 1.0;
  255. ASSERT(checkAzimuthBlend);
  256. if (!checkAzimuthBlend)
  257. azimuthBlend = 0.0;
  258. unsigned numKernels = m_kernelListL->size();
  259. bool isIndexGood = azimuthIndex < numKernels;
  260. ASSERT(isIndexGood);
  261. if (!isIndexGood) {
  262. kernelL = 0;
  263. kernelR = 0;
  264. return;
  265. }
  266. // Return the left and right kernels.
  267. kernelL = m_kernelListL->at(azimuthIndex).get();
  268. kernelR = m_kernelListR->at(azimuthIndex).get();
  269. frameDelayL = m_kernelListL->at(azimuthIndex)->frameDelay();
  270. frameDelayR = m_kernelListR->at(azimuthIndex)->frameDelay();
  271. int azimuthIndex2 = (azimuthIndex + 1) % numKernels;
  272. double frameDelay2L = m_kernelListL->at(azimuthIndex2)->frameDelay();
  273. double frameDelay2R = m_kernelListR->at(azimuthIndex2)->frameDelay();
  274. // Linearly interpolate delays.
  275. frameDelayL = (1.0 - azimuthBlend) * frameDelayL + azimuthBlend * frameDelay2L;
  276. frameDelayR = (1.0 - azimuthBlend) * frameDelayR + azimuthBlend * frameDelay2R;
  277. }
  278. } // namespace WebCore
  279. #endif // ENABLE(WEB_AUDIO)