Skip to content

Commit

Permalink
"A MediaStreamTrack ended due to a capture failure" when selecting bl…
Browse files Browse the repository at this point in the history
…uetooth headphones as audio input device

https://bugs.webkit.org/show_bug.cgi?id=247119
rdar://problem/101628857

Reviewed by Eric Carlson.

Replace the boolean to update the sample buffer by a minimum buffer sample frames, which is set when render fails.
This minimum buffer sample frames is used to compute a minimum buffer size when setting up the audio unit.

Update Mock implementation to cover that case.
We do this by triggering this code path when triggerMockMicrophoneConfigurationChange is called.
We also update the capture verification timer so that timer for mock unit is reduced to 1 second.

Update LayoutTests/fast/mediastream/mediastreamtrack-configurationchange.html to cover that case.

* LayoutTests/fast/mediastream/mediastreamtrack-configurationchange.html:
* Source/WebCore/platform/mediastream/mac/CoreAudioSharedUnit.cpp:
(WebCore::CoreAudioSharedUnit::configureMicrophoneProc):
(WebCore::CoreAudioSharedUnit::processMicrophoneSamples):
(WebCore::CoreAudioSharedUnit::startInternal):
(WebCore::CoreAudioSharedUnit::isProducingMicrophoneSamplesChanged):
* Source/WebCore/platform/mediastream/mac/CoreAudioSharedUnit.h:
* Source/WebCore/platform/mediastream/mac/MockAudioSharedUnit.h:
* Source/WebCore/platform/mediastream/mac/MockAudioSharedUnit.mm:
(WebCore::MockAudioSharedUnit::singleton):
(WebCore::MockAudioSharedUnit::increaseBufferSize):
(WebCore::MockAudioSharedInternalUnit::emitSampleBuffers):
(WebCore::MockAudioSharedInternalUnit::render):
* Source/WebCore/platform/mock/MockRealtimeMediaSourceCenter.cpp:
(WebCore::MockRealtimeMediaSourceCenter::triggerMockMicrophoneConfigurationChange):

Canonical link: https://commits.webkit.org/256091@main
  • Loading branch information
youennf committed Oct 28, 2022
1 parent 5550675 commit b818b25
Show file tree
Hide file tree
Showing 6 changed files with 43 additions and 14 deletions.
Expand Up @@ -29,6 +29,9 @@
});

assert_equals(track.label, "Mock audio device 2");

await new Promise(resolve => setTimeout(resolve, 2000));
assert_equals(track.readyState, "live");
}, "Trigger configurationchange event in case OS changes microphone on its own");
</script>
</body>
Expand Down
27 changes: 17 additions & 10 deletions Source/WebCore/platform/mediastream/mac/CoreAudioSharedUnit.cpp
Expand Up @@ -343,8 +343,14 @@ OSStatus CoreAudioSharedUnit::configureMicrophoneProc(int sampleRate)
return err;
}

m_shouldUpdateMicrophoneSampleBufferSize = false;
m_microphoneSampleBuffer = AudioSampleBufferList::create(microphoneProcFormat, preferredIOBufferSize() * 2);
auto bufferSize = preferredIOBufferSize();
if (m_minimumMicrophoneSampleFrames) {
auto minBufferSize = *m_minimumMicrophoneSampleFrames * microphoneProcFormat.mBytesPerPacket;
if (minBufferSize > bufferSize)
bufferSize = minBufferSize;
m_minimumMicrophoneSampleFrames = { };
}
m_microphoneSampleBuffer = AudioSampleBufferList::create(microphoneProcFormat, bufferSize * 2);
m_microphoneProcFormat = microphoneProcFormat;

return noErr;
Expand Down Expand Up @@ -433,8 +439,6 @@ OSStatus CoreAudioSharedUnit::speakerCallback(void *inRefCon, AudioUnitRenderAct

OSStatus CoreAudioSharedUnit::processMicrophoneSamples(AudioUnitRenderActionFlags& ioActionFlags, const AudioTimeStamp& timeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList* /*ioData*/)
{
++m_microphoneProcsCalled;

if (m_isReconfiguring)
return false;

Expand All @@ -443,16 +447,19 @@ OSStatus CoreAudioSharedUnit::processMicrophoneSamples(AudioUnitRenderActionFlag
AudioBufferList& bufferList = m_microphoneSampleBuffer->bufferList();
if (auto err = m_ioUnit->render(&ioActionFlags, &timeStamp, inBusNumber, inNumberFrames, &bufferList)) {
RELEASE_LOG_ERROR(WebRTC, "CoreAudioSharedUnit::processMicrophoneSamples(%p) AudioUnitRender failed with error %d (%.4s), bufferList size %d, inNumberFrames %d ", this, (int)err, (char*)&err, (int)bufferList.mBuffers[0].mDataByteSize, (int)inNumberFrames);
if (err == kAudio_ParamError && !m_shouldUpdateMicrophoneSampleBufferSize) {
m_shouldUpdateMicrophoneSampleBufferSize = true;
if (err == kAudio_ParamError && !m_minimumMicrophoneSampleFrames) {
m_minimumMicrophoneSampleFrames = inNumberFrames;
// Our buffer might be too small, the preferred buffer size or sample rate might have changed.
callOnMainThread([] {
CoreAudioSharedUnit::singleton().reconfigure();
callOnMainThread([weakThis = WeakPtr { *this }] {
if (weakThis)
weakThis->reconfigure();
});
}
return err;
}

++m_microphoneProcsCalled;

if (!isProducingMicrophoneSamples())
return noErr;

Expand Down Expand Up @@ -563,7 +570,7 @@ OSStatus CoreAudioSharedUnit::startInternal()

m_ioUnitStarted = true;

m_verifyCapturingTimer.startRepeating(verifyCaptureInterval());
m_verifyCapturingTimer.startRepeating(m_ioUnit->verifyCaptureInterval(isProducingMicrophoneSamples()));
m_microphoneProcsCalled = 0;
m_microphoneProcsCalledLastTime = 0;

Expand All @@ -574,7 +581,7 @@ void CoreAudioSharedUnit::isProducingMicrophoneSamplesChanged()
{
if (!isProducingData())
return;
m_verifyCapturingTimer.startRepeating(verifyCaptureInterval());
m_verifyCapturingTimer.startRepeating(m_ioUnit->verifyCaptureInterval(isProducingMicrophoneSamples()));
}

void CoreAudioSharedUnit::validateOutputDevice(uint32_t currentOutputDeviceID)
Expand Down
5 changes: 2 additions & 3 deletions Source/WebCore/platform/mediastream/mac/CoreAudioSharedUnit.h
Expand Up @@ -63,6 +63,7 @@ class CoreAudioSharedUnit final : public BaseAudioSharedUnit {
virtual OSStatus defaultInputDevice(uint32_t*) = 0;
virtual OSStatus defaultOutputDevice(uint32_t*) = 0;
virtual void delaySamples(Seconds) { }
virtual Seconds verifyCaptureInterval(bool isProducingSamples) const { return isProducingSamples ? 10_s : 2_s; }
};

WEBCORE_EXPORT static CoreAudioSharedUnit& unit();
Expand Down Expand Up @@ -120,8 +121,6 @@ class CoreAudioSharedUnit final : public BaseAudioSharedUnit {

void verifyIsCapturing();

Seconds verifyCaptureInterval() { return isProducingMicrophoneSamples() ? 10_s : 2_s; }

CreationCallback m_creationCallback;
GetSampleRateCallback m_getSampleRateCallback;
std::unique_ptr<InternalUnit> m_ioUnit;
Expand Down Expand Up @@ -155,7 +154,7 @@ class CoreAudioSharedUnit final : public BaseAudioSharedUnit {
uint64_t m_microphoneProcsCalledLastTime { 0 };
Timer m_verifyCapturingTimer;

bool m_shouldUpdateMicrophoneSampleBufferSize { false };
std::optional<size_t> m_minimumMicrophoneSampleFrames;
bool m_isReconfiguring { false };
bool m_shouldNotifySpeakerSamplesProducer { false };
bool m_hasNotifiedSpeakerSamplesProducer { false };
Expand Down
Expand Up @@ -34,6 +34,7 @@ namespace WebCore {
namespace MockAudioSharedUnit {

CoreAudioSharedUnit& singleton();
void increaseBufferSize();

}

Expand Down
20 changes: 19 additions & 1 deletion Source/WebCore/platform/mediastream/mac/MockAudioSharedUnit.mm
Expand Up @@ -125,6 +125,7 @@ static void addHum(float amplitude, float frequency, float sampleRate, uint64_t
OSStatus defaultInputDevice(uint32_t*) final;
OSStatus defaultOutputDevice(uint32_t*) final;
void delaySamples(Seconds) final;
Seconds verifyCaptureInterval(bool) const final { return 1_s; }

int sampleRate() const { return m_streamFormat.mSampleRate; }
void tick();
Expand Down Expand Up @@ -160,11 +161,13 @@ static void addHum(float amplitude, float frequency, float sampleRate, uint64_t
AURenderCallbackStruct m_speakerCallback;
};

static bool s_shouldIncreaseBufferSize;
CoreAudioSharedUnit& MockAudioSharedUnit::singleton()
{
static NeverDestroyed<CoreAudioSharedUnit> unit;
static std::once_flag onceFlag;
std::call_once(onceFlag, [&] () {
s_shouldIncreaseBufferSize = false;
unit->setSampleRateRange(CapabilityValueOrRange(44100, 48000));
unit->setInternalUnitCreationCallback([] {
UniqueRef<CoreAudioSharedUnit::InternalUnit> result = makeUniqueRef<MockAudioSharedInternalUnit>();
Expand All @@ -175,6 +178,11 @@ static void addHum(float amplitude, float frequency, float sampleRate, uint64_t
return unit;
}

void MockAudioSharedUnit::increaseBufferSize()
{
s_shouldIncreaseBufferSize = true;
}

static AudioStreamBasicDescription createAudioFormat(Float64 sampleRate, UInt32 channelCount)
{
AudioStreamBasicDescription format;
Expand Down Expand Up @@ -292,8 +300,10 @@ static AudioStreamBasicDescription createAudioFormat(Float64 sampleRate, UInt32
memset(&timeStamp, 0, sizeof(AudioTimeStamp));
timeStamp.mSampleTime = sampleTime;
timeStamp.mHostTime = static_cast<UInt64>(sampleTime);

auto exposedFrameCount = s_shouldIncreaseBufferSize ? 10 * frameCount : frameCount;
if (m_microphoneCallback.inputProc)
m_microphoneCallback.inputProc(m_microphoneCallback.inputProcRefCon, &ioActionFlags, &timeStamp, 1, frameCount, bufferList);
m_microphoneCallback.inputProc(m_microphoneCallback.inputProcRefCon, &ioActionFlags, &timeStamp, 1, exposedFrameCount, nullptr);

ioActionFlags = 0;
if (m_speakerCallback.inputProc)
Expand Down Expand Up @@ -340,6 +350,14 @@ static AudioStreamBasicDescription createAudioFormat(Float64 sampleRate, UInt32

OSStatus MockAudioSharedInternalUnit::render(AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32, UInt32 frameCount, AudioBufferList* buffer)
{
if (s_shouldIncreaseBufferSize) {
auto copySize = frameCount * m_streamFormat.mBytesPerPacket;
if (buffer->mNumberBuffers && copySize <= buffer->mBuffers[0].mDataByteSize)
s_shouldIncreaseBufferSize = false;
// We still return an error in case s_shouldIncreaseBufferSize is false since we do not have enough data to write.
return kAudio_ParamError;
}

auto* sourceBuffer = m_audioBufferList->list();
if (buffer->mNumberBuffers > sourceBuffer->mNumberBuffers)
return kAudio_ParamError;
Expand Down
Expand Up @@ -326,6 +326,7 @@ void MockRealtimeMediaSourceCenter::triggerMockMicrophoneConfigurationChange()
auto devices = audioCaptureDeviceManager().captureDevices();
if (devices.size() <= 1)
return;
MockAudioSharedUnit::increaseBufferSize();
MockAudioSharedUnit::singleton().handleNewCurrentMicrophoneDevice(WTFMove(devices[1]));
#endif
}
Expand Down

0 comments on commit b818b25

Please sign in to comment.