| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 173
 174
 175
 176
 177
 178
 179
 180
 181
 182
 183
 184
 185
 186
 187
 188
 189
 190
 191
 192
 193
 194
 195
 196
 197
 198
 199
 200
 201
 202
 203
 204
 205
 206
 207
 208
 209
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 
 | /*
 * Copyright (C) 2010, Google Inc. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1.  Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2.  Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
#include "HRTFPanner.h"
#include "HRTFDatabaseLoader.h"
#include "FFTConvolver.h"
#include "HRTFDatabase.h"
#include "AudioBlock.h"
using namespace mozilla;
using dom::ChannelInterpretation;
namespace WebCore {
// The value of 2 milliseconds is larger than the largest delay which exists in
// any HRTFKernel from the default HRTFDatabase (0.0136 seconds). We ASSERT the
// delay values used in process() with this value.
const float MaxDelayTimeSeconds = 0.002f;
const int UninitializedAzimuth = -1;
HRTFPanner::HRTFPanner(float sampleRate,
                       already_AddRefed<HRTFDatabaseLoader> databaseLoader)
    : m_databaseLoader(databaseLoader),
      m_sampleRate(sampleRate),
      m_crossfadeSelection(CrossfadeSelection1),
      m_azimuthIndex1(UninitializedAzimuth),
      m_azimuthIndex2(UninitializedAzimuth)
      // m_elevation1 and m_elevation2 are initialized in pan()
      ,
      m_crossfadeX(0),
      m_crossfadeIncr(0),
      m_convolverL1(HRTFElevation::fftSizeForSampleRate(sampleRate)),
      m_convolverR1(m_convolverL1.fftSize()),
      m_convolverL2(m_convolverL1.fftSize()),
      m_convolverR2(m_convolverL1.fftSize()),
      m_delayLine(MaxDelayTimeSeconds * sampleRate) {
  MOZ_ASSERT(m_databaseLoader);
  MOZ_COUNT_CTOR(HRTFPanner);
}
HRTFPanner::~HRTFPanner() { MOZ_COUNT_DTOR(HRTFPanner); }
size_t HRTFPanner::sizeOfIncludingThis(
    mozilla::MallocSizeOf aMallocSizeOf) const {
  size_t amount = aMallocSizeOf(this);
  // NB: m_databaseLoader can be shared, so it is not measured here
  amount += m_convolverL1.sizeOfExcludingThis(aMallocSizeOf);
  amount += m_convolverR1.sizeOfExcludingThis(aMallocSizeOf);
  amount += m_convolverL2.sizeOfExcludingThis(aMallocSizeOf);
  amount += m_convolverR2.sizeOfExcludingThis(aMallocSizeOf);
  amount += m_delayLine.SizeOfExcludingThis(aMallocSizeOf);
  return amount;
}
void HRTFPanner::reset() {
  m_azimuthIndex1 = UninitializedAzimuth;
  m_azimuthIndex2 = UninitializedAzimuth;
  // m_elevation1 and m_elevation2 are initialized in pan()
  m_crossfadeSelection = CrossfadeSelection1;
  m_crossfadeX = 0.0f;
  m_crossfadeIncr = 0.0f;
  m_convolverL1.reset();
  m_convolverR1.reset();
  m_convolverL2.reset();
  m_convolverR2.reset();
  m_delayLine.Reset();
}
int HRTFPanner::calculateDesiredAzimuthIndexAndBlend(double azimuth,
                                                     double& azimuthBlend) {
  // Convert the azimuth angle from the range -180 -> +180 into the range 0 ->
  // 360. The azimuth index may then be calculated from this positive value.
  if (azimuth < 0) azimuth += 360.0;
  int numberOfAzimuths = HRTFDatabase::numberOfAzimuths();
  const double angleBetweenAzimuths = 360.0 / numberOfAzimuths;
  // Calculate the azimuth index and the blend (0 -> 1) for interpolation.
  double desiredAzimuthIndexFloat = azimuth / angleBetweenAzimuths;
  int desiredAzimuthIndex = static_cast<int>(desiredAzimuthIndexFloat);
  azimuthBlend =
      desiredAzimuthIndexFloat - static_cast<double>(desiredAzimuthIndex);
  // We don't immediately start using this azimuth index, but instead approach
  // this index from the last index we rendered at. This minimizes the clicks
  // and graininess for moving sources which occur otherwise.
  desiredAzimuthIndex = std::max(0, desiredAzimuthIndex);
  desiredAzimuthIndex = std::min(numberOfAzimuths - 1, desiredAzimuthIndex);
  return desiredAzimuthIndex;
}
void HRTFPanner::pan(double desiredAzimuth, double elevation,
                     const AudioBlock* inputBus, AudioBlock* outputBus) {
#ifdef DEBUG
  unsigned numInputChannels = inputBus->IsNull() ? 0 : inputBus->ChannelCount();
  MOZ_ASSERT(numInputChannels <= 2);
  MOZ_ASSERT(inputBus->GetDuration() == WEBAUDIO_BLOCK_SIZE);
#endif
  bool isOutputGood = outputBus && outputBus->ChannelCount() == 2 &&
                      outputBus->GetDuration() == WEBAUDIO_BLOCK_SIZE;
  MOZ_ASSERT(isOutputGood);
  if (!isOutputGood) {
    if (outputBus) outputBus->SetNull(outputBus->GetDuration());
    return;
  }
  HRTFDatabase* database = m_databaseLoader->database();
  if (!database) {  // not yet loaded
    outputBus->SetNull(outputBus->GetDuration());
    return;
  }
  // IRCAM HRTF azimuths values from the loaded database is reversed from the
  // panner's notion of azimuth.
  double azimuth = -desiredAzimuth;
  bool isAzimuthGood = azimuth >= -180.0 && azimuth <= 180.0;
  MOZ_ASSERT(isAzimuthGood);
  if (!isAzimuthGood) {
    outputBus->SetNull(outputBus->GetDuration());
    return;
  }
  // Normally, we'll just be dealing with mono sources.
  // If we have a stereo input, implement stereo panning with left source
  // processed by left HRTF, and right source by right HRTF.
  // Get destination pointers.
  float* destinationL =
      static_cast<float*>(const_cast<void*>(outputBus->mChannelData[0]));
  float* destinationR =
      static_cast<float*>(const_cast<void*>(outputBus->mChannelData[1]));
  double azimuthBlend;
  int desiredAzimuthIndex =
      calculateDesiredAzimuthIndexAndBlend(azimuth, azimuthBlend);
  // Initially snap azimuth and elevation values to first values encountered.
  if (m_azimuthIndex1 == UninitializedAzimuth) {
    m_azimuthIndex1 = desiredAzimuthIndex;
    m_elevation1 = elevation;
  }
  if (m_azimuthIndex2 == UninitializedAzimuth) {
    m_azimuthIndex2 = desiredAzimuthIndex;
    m_elevation2 = elevation;
  }
  // Cross-fade / transition over a period of around 45 milliseconds.
  // This is an empirical value tuned to be a reasonable trade-off between
  // smoothness and speed.
  const double fadeFrames = sampleRate() <= 48000 ? 2048 : 4096;
  // Check for azimuth and elevation changes, initiating a cross-fade if needed.
  if (!m_crossfadeX && m_crossfadeSelection == CrossfadeSelection1) {
    if (desiredAzimuthIndex != m_azimuthIndex1 || elevation != m_elevation1) {
      // Cross-fade from 1 -> 2
      m_crossfadeIncr = 1 / fadeFrames;
      m_azimuthIndex2 = desiredAzimuthIndex;
      m_elevation2 = elevation;
    }
  }
  if (m_crossfadeX == 1 && m_crossfadeSelection == CrossfadeSelection2) {
    if (desiredAzimuthIndex != m_azimuthIndex2 || elevation != m_elevation2) {
      // Cross-fade from 2 -> 1
      m_crossfadeIncr = -1 / fadeFrames;
      m_azimuthIndex1 = desiredAzimuthIndex;
      m_elevation1 = elevation;
    }
  }
  // Get the HRTFKernels and interpolated delays.
  HRTFKernel* kernelL1;
  HRTFKernel* kernelR1;
  HRTFKernel* kernelL2;
  HRTFKernel* kernelR2;
  double frameDelayL1;
  double frameDelayR1;
  double frameDelayL2;
  double frameDelayR2;
  database->getKernelsFromAzimuthElevation(azimuthBlend, m_azimuthIndex1,
                                           m_elevation1, kernelL1, kernelR1,
                                           frameDelayL1, frameDelayR1);
  database->getKernelsFromAzimuthElevation(azimuthBlend, m_azimuthIndex2,
                                           m_elevation2, kernelL2, kernelR2,
                                           frameDelayL2, frameDelayR2);
  bool areKernelsGood = kernelL1 && kernelR1 && kernelL2 && kernelR2;
  MOZ_ASSERT(areKernelsGood);
  if (!areKernelsGood) {
    outputBus->SetNull(outputBus->GetDuration());
    return;
  }
  MOZ_ASSERT(frameDelayL1 / sampleRate() < MaxDelayTimeSeconds &&
             frameDelayR1 / sampleRate() < MaxDelayTimeSeconds);
  MOZ_ASSERT(frameDelayL2 / sampleRate() < MaxDelayTimeSeconds &&
             frameDelayR2 / sampleRate() < MaxDelayTimeSeconds);
  // Crossfade inter-aural delays based on transitions.
  float frameDelaysL[WEBAUDIO_BLOCK_SIZE];
  float frameDelaysR[WEBAUDIO_BLOCK_SIZE];
  {
    float x = m_crossfadeX;
    float incr = m_crossfadeIncr;
    for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
      frameDelaysL[i] = (1 - x) * frameDelayL1 + x * frameDelayL2;
      frameDelaysR[i] = (1 - x) * frameDelayR1 + x * frameDelayR2;
      x += incr;
    }
  }
  // First run through delay lines for inter-aural time difference.
  m_delayLine.Write(*inputBus);
  // "Speakers" means a mono input is read into both outputs (with possibly
  // different delays).
  m_delayLine.ReadChannel(frameDelaysL, outputBus, 0,
                          ChannelInterpretation::Speakers);
  m_delayLine.ReadChannel(frameDelaysR, outputBus, 1,
                          ChannelInterpretation::Speakers);
  m_delayLine.NextBlock();
  bool needsCrossfading = m_crossfadeIncr;
  const float* convolutionDestinationL1;
  const float* convolutionDestinationR1;
  const float* convolutionDestinationL2;
  const float* convolutionDestinationR2;
  // Now do the convolutions.
  // Note that we avoid doing convolutions on both sets of convolvers if we're
  // not currently cross-fading.
  if (m_crossfadeSelection == CrossfadeSelection1 || needsCrossfading) {
    convolutionDestinationL1 =
        m_convolverL1.process(kernelL1->fftFrame(), destinationL);
    convolutionDestinationR1 =
        m_convolverR1.process(kernelR1->fftFrame(), destinationR);
  }
  if (m_crossfadeSelection == CrossfadeSelection2 || needsCrossfading) {
    convolutionDestinationL2 =
        m_convolverL2.process(kernelL2->fftFrame(), destinationL);
    convolutionDestinationR2 =
        m_convolverR2.process(kernelR2->fftFrame(), destinationR);
  }
  if (needsCrossfading) {
    // Apply linear cross-fade.
    float x = m_crossfadeX;
    float incr = m_crossfadeIncr;
    for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
      destinationL[i] = (1 - x) * convolutionDestinationL1[i] +
                        x * convolutionDestinationL2[i];
      destinationR[i] = (1 - x) * convolutionDestinationR1[i] +
                        x * convolutionDestinationR2[i];
      x += incr;
    }
    // Update cross-fade value from local.
    m_crossfadeX = x;
    if (m_crossfadeIncr > 0 && fabs(m_crossfadeX - 1) < m_crossfadeIncr) {
      // We've fully made the crossfade transition from 1 -> 2.
      m_crossfadeSelection = CrossfadeSelection2;
      m_crossfadeX = 1;
      m_crossfadeIncr = 0;
    } else if (m_crossfadeIncr < 0 && fabs(m_crossfadeX) < -m_crossfadeIncr) {
      // We've fully made the crossfade transition from 2 -> 1.
      m_crossfadeSelection = CrossfadeSelection1;
      m_crossfadeX = 0;
      m_crossfadeIncr = 0;
    }
  } else {
    const float* sourceL;
    const float* sourceR;
    if (m_crossfadeSelection == CrossfadeSelection1) {
      sourceL = convolutionDestinationL1;
      sourceR = convolutionDestinationR1;
    } else {
      sourceL = convolutionDestinationL2;
      sourceR = convolutionDestinationR2;
    }
    PodCopy(destinationL, sourceL, WEBAUDIO_BLOCK_SIZE);
    PodCopy(destinationR, sourceR, WEBAUDIO_BLOCK_SIZE);
  }
}
int HRTFPanner::maxTailFrames() const {
  // Although the ideal tail time would be the length of the impulse
  // response, there is additional tail time from the approximations in the
  // implementation.  Because HRTFPanner is implemented with a DelayKernel
  // and a FFTConvolver, the tailTime of the HRTFPanner is the sum of the
  // tailTime of the DelayKernel and the tailTime of the FFTConvolver.  The
  // FFTs of the convolver are fftSize(), half of which is latency, but this
  // is aligned with blocks and so is reduced by the one block which is
  // processed immediately.
  return m_delayLine.MaxDelayTicks() + m_convolverL1.fftSize() / 2 +
         m_convolverL1.latencyFrames();
}
}  // namespace WebCore
 |