Skip to content
Permalink
Browse files
Multi-Channel support in AudioBufferSourceNode
https://bugs.webkit.org/show_bug.cgi?id=79202

Patch by Wei James <james.wei@intel.com> on 2012-02-27
Reviewed by Chris Rogers.

Source/WebCore:

Test: webaudio/audiobuffersource-multi-channels.html

* webaudio/AudioBufferSourceNode.cpp:
(WebCore::AudioBufferSourceNode::renderSilenceAndFinishIfNotLooping):
(WebCore::AudioBufferSourceNode::renderFromBuffer):
(WebCore::AudioBufferSourceNode::setBuffer):
* webaudio/AudioBufferSourceNode.h:
(AudioBufferSourceNode):

LayoutTests:

* webaudio/audiobuffersource-channels-expected.txt:
* webaudio/audiobuffersource-channels.html:
* webaudio/audiobuffersource-multi-channels-expected.wav: Added.
* webaudio/audiobuffersource-multi-channels.html: Added.

Canonical link: https://commits.webkit.org/96837@main
git-svn-id: https://svn.webkit.org/repository/webkit/trunk@109076 268f45cc-cd09-0410-ab3c-d52691b4dbfc
  • Loading branch information
Xiaohai Wei authored and webkit-commit-queue committed Feb 28, 2012
1 parent e0be41c commit 1ee035610dca7d40b6fba0f67203dac4c3d264c4
@@ -1,3 +1,15 @@
2012-02-27 Wei James <james.wei@intel.com>

Multi-Channel support in AudioBufferSourceNode
https://bugs.webkit.org/show_bug.cgi?id=79202

Reviewed by Chris Rogers.

* webaudio/audiobuffersource-channels-expected.txt:
* webaudio/audiobuffersource-channels.html:
* webaudio/audiobuffersource-multi-channels-expected.wav: Added.
* webaudio/audiobuffersource-multi-channels.html: Added.

2012-02-27 Emil A Eklund <eae@chromium.org>

Printed font-size should not be dependant on zoom level
@@ -5,13 +5,13 @@ On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE
PASS source.buffer = 57 threw exception TypeError: Value is not of type AudioBuffer.
PASS Mono buffer can be set.
PASS Stereo buffer can be set.
PASS 3 channel buffer is not settable.
PASS 4 channel buffer is not settable.
PASS 5 channel buffer is not settable.
PASS 6 channel buffer is not settable.
PASS 7 channel buffer is not settable.
PASS 8 channel buffer is not settable.
PASS 9 channel buffer is not settable.
PASS 3 channels buffer can be set.
PASS 4 channels buffer can be set.
PASS 5 channels buffer can be set.
PASS 6 channels buffer can be set.
PASS 7 channels buffer can be set.
PASS 8 channels buffer can be set.
PASS 9 channels buffer can be set.
PASS successfullyParsed is true

TEST COMPLETE
@@ -48,17 +48,16 @@
testFailed("Stereo buffer can not be set.");
}

// Check a few buffers with more than two channels and check for failure.
// (for now the implementation will only work with mono and stereo buffers)
// Check buffers with more than two channels.
for (var i = 3; i < 10; ++i) {
try {
var buffer = context.createBuffer(i, 1024, context.sampleRate);
source.buffer = buffer;
var message = i + " channel buffer should not be settable.";
testFailed(message);
} catch(e) {
var message = i + " channel buffer is not settable.";
var message = i + " channels buffer can be set.";
testPassed(message);
} catch(e) {
var message = i + " channels buffer can not be set.";
testFailed(message);
}
}

Binary file not shown.
@@ -0,0 +1,43 @@
<!DOCTYPE html>

<!--
Test AudioBufferSourceNode supports 5.1 channel.
-->

<html>
<head>
<script type="text/javascript" src="resources/audio-testing.js"></script>
<script type="text/javascript" src="resources/mix-testing.js"></script>
</head>
<body>

<script>

function runTest() {
if (!window.layoutTestController)
return;

layoutTestController.waitUntilDone();

window.jsTestAsync = true;

// Create offline audio context
var sampleRate = 44100.0;
var context = new webkitAudioContext(6, sampleRate * renderLengthSeconds, sampleRate);
var toneBuffer = createToneBuffer(context, 440, toneLengthSeconds, 6);

var source = context.createBufferSource();
source.buffer = toneBuffer;

source.connect(context.destination);
source.noteOn(0);

context.oncomplete = finishAudioTest;
context.startRendering();
}

runTest();
</script>

</body>
</html>
@@ -1,3 +1,19 @@
2012-02-27 Wei James <james.wei@intel.com>

Multi-Channel support in AudioBufferSourceNode
https://bugs.webkit.org/show_bug.cgi?id=79202

Reviewed by Chris Rogers.

Test: webaudio/audiobuffersource-multi-channels.html

* webaudio/AudioBufferSourceNode.cpp:
(WebCore::AudioBufferSourceNode::renderSilenceAndFinishIfNotLooping):
(WebCore::AudioBufferSourceNode::renderFromBuffer):
(WebCore::AudioBufferSourceNode::setBuffer):
* webaudio/AudioBufferSourceNode.h:
(AudioBufferSourceNode):

2012-02-27 Leo Yang <leo.yang@torchmobile.com.cn>

[BlackBerry] Upstream the BlackBerry change to platform/graphics/IntRect.h
@@ -126,6 +126,9 @@ void AudioBufferSourceNode::process(size_t framesToProcess)
quantumFrameOffset = min(quantumFrameOffset, framesToProcess); // clamp to valid range
size_t bufferFramesToProcess = framesToProcess - quantumFrameOffset;

for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i)
m_destinationChannels[i] = outputBus->channel(i)->mutableData();

// Render by reading directly from the buffer.
renderFromBuffer(outputBus, quantumFrameOffset, bufferFramesToProcess);

@@ -144,7 +147,7 @@ void AudioBufferSourceNode::process(size_t framesToProcess)

if (isSafe) {
for (unsigned i = 0; i < outputBus->numberOfChannels(); ++i)
memset(outputBus->channel(i)->mutableData() + zeroStartFrame, 0, sizeof(float) * framesToZero);
memset(m_destinationChannels[i] + zeroStartFrame, 0, sizeof(float) * framesToZero);
}

m_isPlaying = false;
@@ -160,7 +163,7 @@ void AudioBufferSourceNode::process(size_t framesToProcess)
}

// Returns true if we're finished.
bool AudioBufferSourceNode::renderSilenceAndFinishIfNotLooping(float* destinationL, float* destinationR, size_t framesToProcess)
bool AudioBufferSourceNode::renderSilenceAndFinishIfNotLooping(AudioBus*, unsigned index, size_t framesToProcess)
{
if (!loop()) {
// If we're not looping, then stop playing when we get to the end.
@@ -169,10 +172,8 @@ bool AudioBufferSourceNode::renderSilenceAndFinishIfNotLooping(float* destinatio
if (framesToProcess > 0) {
// We're not looping and we've reached the end of the sample data, but we still need to provide more output,
// so generate silence for the remaining.
memset(destinationL, 0, sizeof(float) * framesToProcess);

if (destinationR)
memset(destinationR, 0, sizeof(float) * framesToProcess);
for (unsigned i = 0; i < numberOfChannels(); ++i)
memset(m_destinationChannels[i] + index, 0, sizeof(float) * framesToProcess);
}

finish();
@@ -194,21 +195,11 @@ void AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
unsigned numberOfChannels = this->numberOfChannels();
unsigned busNumberOfChannels = bus->numberOfChannels();

// FIXME: we can add support for sources with more than two channels, but this is not a common case.
bool channelCountGood = numberOfChannels == busNumberOfChannels && (numberOfChannels == 1 || numberOfChannels == 2);
bool channelCountGood = numberOfChannels && numberOfChannels == busNumberOfChannels;
ASSERT(channelCountGood);
if (!channelCountGood)
return;

// Get the destination pointers.
float* destinationL = bus->channel(0)->mutableData();
ASSERT(destinationL);
if (!destinationL)
return;
float* destinationR = (numberOfChannels < 2) ? 0 : bus->channel(1)->mutableData();

bool isStereo = destinationR;

// Sanity check destinationFrameOffset, numberOfFrames.
size_t destinationLength = bus->length();

@@ -221,18 +212,15 @@ void AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
ASSERT(isOffsetGood);
if (!isOffsetGood)
return;

// Potentially zero out initial frames leading up to the offset.
if (destinationFrameOffset) {
memset(destinationL, 0, sizeof(float) * destinationFrameOffset);
if (destinationR)
memset(destinationR, 0, sizeof(float) * destinationFrameOffset);
for (unsigned i = 0; i < numberOfChannels; ++i)
memset(m_destinationChannels[i], 0, sizeof(float) * destinationFrameOffset);
}

// Offset the pointers to the correct offset frame.
destinationL += destinationFrameOffset;
if (destinationR)
destinationR += destinationFrameOffset;
unsigned writeIndex = destinationFrameOffset;

size_t bufferLength = buffer()->length();
double bufferSampleRate = buffer()->sampleRate();
@@ -264,10 +252,6 @@ void AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
if (m_virtualReadIndex >= endFrame)
m_virtualReadIndex = startFrame; // reset to start

// Get pointers to the start of the sample buffer.
float* sourceL = m_buffer->getChannelData(0)->data();
float* sourceR = m_buffer->numberOfChannels() == 2 ? m_buffer->getChannelData(1)->data() : 0;

double pitchRate = totalPitchRate();

// Get local copy.
@@ -276,6 +260,9 @@ void AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
// Render loop - reading from the source buffer to the destination using linear interpolation.
int framesToProcess = numberOfFrames;

const float** sourceChannels = m_sourceChannels.get();
float** destinationChannels = m_destinationChannels.get();

// Optimize for the very common case of playing back with pitchRate == 1.
// We can avoid the linear interpolation.
if (pitchRate == 1 && virtualReadIndex == floor(virtualReadIndex)) {
@@ -285,20 +272,17 @@ void AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
int framesThisTime = min(framesToProcess, framesToEnd);
framesThisTime = max(0, framesThisTime);

memcpy(destinationL, sourceL + readIndex, sizeof(*sourceL) * framesThisTime);
destinationL += framesThisTime;
if (isStereo) {
memcpy(destinationR, sourceR + readIndex, sizeof(*sourceR) * framesThisTime);
destinationR += framesThisTime;
}
for (unsigned i = 0; i < numberOfChannels; ++i)
memcpy(destinationChannels[i] + writeIndex, sourceChannels[i] + readIndex, sizeof(float) * framesThisTime);

writeIndex += framesThisTime;
readIndex += framesThisTime;
framesToProcess -= framesThisTime;

// Wrap-around.
if (readIndex >= endFrame) {
readIndex -= deltaFrames;
if (renderSilenceAndFinishIfNotLooping(destinationL, destinationR, framesToProcess))
if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess))
break;
}
}
@@ -324,25 +308,25 @@ void AudioBufferSourceNode::renderFromBuffer(AudioBus* bus, unsigned destination
break;

// Linear interpolation.
double sampleL1 = sourceL[readIndex];
double sampleL2 = sourceL[readIndex2];
double sampleL = (1.0 - interpolationFactor) * sampleL1 + interpolationFactor * sampleL2;
*destinationL++ = narrowPrecisionToFloat(sampleL);

if (isStereo) {
double sampleR1 = sourceR[readIndex];
double sampleR2 = sourceR[readIndex2];
double sampleR = (1.0 - interpolationFactor) * sampleR1 + interpolationFactor * sampleR2;
*destinationR++ = narrowPrecisionToFloat(sampleR);
for (unsigned i = 0; i < numberOfChannels; ++i) {
float* destination = destinationChannels[i];
const float* source = sourceChannels[i];

double sample1 = source[readIndex];
double sample2 = source[readIndex2];
double sample = (1.0 - interpolationFactor) * sample1 + interpolationFactor * sample2;

destination[writeIndex] = narrowPrecisionToFloat(sample);
}
writeIndex++;

virtualReadIndex += pitchRate;

// Wrap-around, retaining sub-sample position since virtualReadIndex is floating-point.
if (virtualReadIndex >= endFrame) {
virtualReadIndex -= deltaFrames;

if (renderSilenceAndFinishIfNotLooping(destinationL, destinationR, framesToProcess))
if (renderSilenceAndFinishIfNotLooping(bus, writeIndex, framesToProcess))
break;
}
}
@@ -379,11 +363,13 @@ bool AudioBufferSourceNode::setBuffer(AudioBuffer* buffer)
if (buffer) {
// Do any necesssary re-configuration to the buffer's number of channels.
unsigned numberOfChannels = buffer->numberOfChannels();
if (!numberOfChannels || numberOfChannels > 2) {
// FIXME: implement multi-channel greater than stereo.
return false;
}
output(0)->setNumberOfChannels(numberOfChannels);

m_sourceChannels = adoptArrayPtr(new const float* [numberOfChannels]);
m_destinationChannels = adoptArrayPtr(new float* [numberOfChannels]);

for (unsigned i = 0; i < numberOfChannels; ++i)
m_sourceChannels[i] = buffer->getChannelData(i)->data();
}

m_virtualReadIndex = 0;
@@ -30,6 +30,7 @@
#include "AudioGain.h"
#include "AudioPannerNode.h"
#include "AudioSourceNode.h"
#include <wtf/OwnArrayPtr.h>
#include <wtf/PassRefPtr.h>
#include <wtf/RefPtr.h>
#include <wtf/Threading.h>
@@ -87,11 +88,16 @@ class AudioBufferSourceNode : public AudioSourceNode {

void renderFromBuffer(AudioBus*, unsigned destinationFrameOffset, size_t numberOfFrames);

inline bool renderSilenceAndFinishIfNotLooping(float* destinationL, float* destinationR, size_t framesToProcess);
// Render silence starting from "index" frame in AudioBus.
inline bool renderSilenceAndFinishIfNotLooping(AudioBus*, unsigned index, size_t framesToProcess);

// m_buffer holds the sample data which this node outputs.
RefPtr<AudioBuffer> m_buffer;

// Pointers for the buffer and destination.
OwnArrayPtr<const float*> m_sourceChannels;
OwnArrayPtr<float*> m_destinationChannels;

// Used for the "gain" and "playbackRate" attributes.
RefPtr<AudioGain> m_gain;
RefPtr<AudioParam> m_playbackRate;

0 comments on commit 1ee0356

Please sign in to comment.