// m_fftFrame is the frequency-domain representation of the impulse response with the delay removed
// m_frameDelay is the leading delay of the original impulse response.
static PassRefPtr<HRTFKernel> create(PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
{
return adoptRef(new HRTFKernel(fftFrame, frameDelay, sampleRate));
size_t fftSize() const { return m_fftFrame->fftSize(); }
float frameDelay() const { return m_frameDelay; }
HRTFKernel(PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
: m_fftFrame(fftFrame)
, m_frameDelay(frameDelay)
OwnPtr<FFTFrame> m_fftFrame;
float m_frameDelay;
float frameDelay = narrowPrecisionToFloat(estimationFrame.extractAverageGroupDelay());
return frameDelay;
HRTFKernel::HRTFKernel(AudioChannel* channel, size_t fftSize, float sampleRate, bool bassBoost)
: m_frameDelay(0)
// Determine the leading delay (average group delay) for the response.
m_frameDelay = extractAverageGroupDelay(channel, fftSize / 2);
// Add leading delay back in.
fftFrame.addConstantGroupDelay(m_frameDelay);
float frameDelay = (1 - x) * kernel1->frameDelay() + x * kernel2->frameDelay();
OwnPtr<FFTFrame> interpolatedFrame = FFTFrame::createInterpolatedFrame(*kernel1->fftFrame(), *kernel2->fftFrame(), x);
return HRTFKernel::create(interpolatedFrame.release(), frameDelay, sampleRate1);
void HRTFElevation::getKernelsFromAzimuth(double azimuthBlend, unsigned azimuthIndex, HRTFKernel* &kernelL, HRTFKernel* &kernelR, double& frameDelayL, double& frameDelayR)
frameDelayL = m_kernelListL->at(azimuthIndex)->frameDelay();
frameDelayR = m_kernelListR->at(azimuthIndex)->frameDelay();
int azimuthIndex2 = (azimuthIndex + 1) % numKernels;
double frameDelay2L = m_kernelListL->at(azimuthIndex2)->frameDelay();
double frameDelay2R = m_kernelListR->at(azimuthIndex2)->frameDelay();
// Linearly interpolate delays.
frameDelayL = (1.0 - azimuthBlend) * frameDelayL + azimuthBlend * frameDelay2L;
frameDelayR = (1.0 - azimuthBlend) * frameDelayR + azimuthBlend * frameDelay2R;
}