From cfbbbcfa2c139fd4c93a18fbfac89101bb8d65f3 Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Thu, 21 Aug 2025 09:55:13 -0700 Subject: [PATCH 01/14] [WebRTC] Rework device handling sequence so that we can handle unplugging/re-plugging devices The device handling was not processing device updates in the proper sequence as things like AEC use both input and output devices. Devices like headsets are both so unplugging them resulted in various mute conditions and sometimes even a crash. Now, we update both capture and render devices at once in the proper sequence. Test Guidance: * Bring two users in the same place in webrtc regions. * The 'listening' one should have a headset or something set oas 'Default' * Press 'talk' on one, and verify the other can hear. * Unplug the headset from the listening one. * Validate that audio changes from the headset to the speakers. * Plug the headset back in. * Validate that audio changes from speakers to headset. * Do the same type of test with the headset viewer talking. * The microphone used should switch from the headset to the computer (it should have one) Do other various device tests, such as setting devices explicitly, messing with the device selector, etc. --- indra/llwebrtc/llwebrtc.cpp | 209 ++++++++++++-------------------- indra/llwebrtc/llwebrtc_impl.h | 1 + indra/newview/llvoicewebrtc.cpp | 35 ++++-- 3 files changed, 106 insertions(+), 139 deletions(-) diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index 20951ff8167..5e937270dc0 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -39,11 +39,14 @@ namespace llwebrtc { +#if WEBRTC_WIN +static int16_t PLAYOUT_DEVICE_DEFAULT = webrtc::AudioDeviceModule::kDefaultDevice; +static int16_t RECORD_DEVICE_DEFAULT = webrtc::AudioDeviceModule::kDefaultDevice; +#else +static int16_t PLAYOUT_DEVICE_DEFAULT = 0; +static int16_t RECORD_DEVICE_DEFAULT = 0; +#endif -static int16_t PLAYOUT_DEVICE_DEFAULT = -1; -static int16_t PLAYOUT_DEVICE_BAD = -2; -static int16_t RECORD_DEVICE_DEFAULT = -1; -static int16_t RECORD_DEVICE_BAD = -2; LLAudioDeviceObserver::LLAudioDeviceObserver() : mSumVector {0}, mMicrophoneEnergy(0.0) {} @@ -416,29 +419,37 @@ void LLWebRTCImpl::unsetDevicesObserver(LLWebRTCDevicesObserver *observer) void ll_set_device_module_capture_device(rtc::scoped_refptr device_module, int16_t device) { -#if WEBRTC_WIN - if (device < 0) - { - device_module->SetRecordingDevice(webrtc::AudioDeviceModule::kDefaultDevice); - } - else - { - device_module->SetRecordingDevice(device); - } -#else - // passed in default is -1, but the device list - // has it at 0 - device_module->SetRecordingDevice(device + 1); -#endif + device_module->SetRecordingDevice(device); device_module->InitMicrophone(); + device_module->SetStereoRecording(false); + device_module->InitRecording(); +} + +void ll_set_device_module_render_device(rtc::scoped_refptr device_module, int16_t device) +{ + device_module->SetPlayoutDevice(device); + device_module->InitSpeaker(); + device_module->SetStereoPlayout(true); + device_module->InitPlayout(); } void LLWebRTCImpl::setCaptureDevice(const std::string &id) { int16_t recordingDevice = RECORD_DEVICE_DEFAULT; +#if WEBRTC_WIN + int16_t device_start = 0; +#else + if (mRecordingDeviceList.size()) + { + // no recording devices + return; + } + int16_t device_start = 1; +#endif + if (id != "Default") { - for (int16_t i = 0; i < mRecordingDeviceList.size(); i++) + for (int16_t i = device_start; i < mRecordingDeviceList.size(); i++) { if (mRecordingDeviceList[i].mID == id) { @@ -447,62 +458,27 @@ void LLWebRTCImpl::setCaptureDevice(const std::string &id) } } } - if (recordingDevice == mRecordingDevice) - { - return; - } + mRecordingDevice = recordingDevice; - if (mTuningMode) - { - mWorkerThread->PostTask([this, recordingDevice]() - { - ll_set_device_module_capture_device(mTuningDeviceModule, recordingDevice); - }); - } - else - { - mWorkerThread->PostTask([this, recordingDevice]() - { - bool recording = mPeerDeviceModule->Recording(); - if (recording) - { - mPeerDeviceModule->StopRecording(); - } - ll_set_device_module_capture_device(mPeerDeviceModule, recordingDevice); - if (recording) - { - mPeerDeviceModule->SetStereoRecording(false); - mPeerDeviceModule->InitRecording(); - mPeerDeviceModule->StartRecording(); - } - }); - } + deployDevices(); } - -void ll_set_device_module_render_device(rtc::scoped_refptr device_module, int16_t device) +void LLWebRTCImpl::setRenderDevice(const std::string &id) { + int16_t playoutDevice = PLAYOUT_DEVICE_DEFAULT; #if WEBRTC_WIN - if (device < 0) - { - device_module->SetPlayoutDevice(webrtc::AudioDeviceModule::kDefaultDevice); - } - else + int16_t device_start = 0; +#else + if (mPlayoutDeviceList.size()) { - device_module->SetPlayoutDevice(device); + // no playout devices + return; } -#else - device_module->SetPlayoutDevice(device + 1); + int16_t device_start = 1; #endif - device_module->InitSpeaker(); -} - -void LLWebRTCImpl::setRenderDevice(const std::string &id) -{ - int16_t playoutDevice = PLAYOUT_DEVICE_DEFAULT; if (id != "Default") { - for (int16_t i = 0; i < mPlayoutDeviceList.size(); i++) + for (int16_t i = device_start; i < mPlayoutDeviceList.size(); i++) { if (mPlayoutDeviceList[i].mID == id) { @@ -511,39 +487,8 @@ void LLWebRTCImpl::setRenderDevice(const std::string &id) } } } - if (playoutDevice == mPlayoutDevice) - { - return; - } mPlayoutDevice = playoutDevice; - - if (mTuningMode) - { - mWorkerThread->PostTask( - [this, playoutDevice]() - { - ll_set_device_module_render_device(mTuningDeviceModule, playoutDevice); - }); - } - else - { - mWorkerThread->PostTask( - [this, playoutDevice]() - { - bool playing = mPeerDeviceModule->Playing(); - if (playing) - { - mPeerDeviceModule->StopPlayout(); - } - ll_set_device_module_render_device(mPeerDeviceModule, playoutDevice); - if (playing) - { - mPeerDeviceModule->SetStereoPlayout(true); - mPeerDeviceModule->InitPlayout(); - mPeerDeviceModule->StartPlayout(); - } - }); - } + deployDevices(); } // updateDevices needs to happen on the worker thread. @@ -593,9 +538,8 @@ void LLWebRTCImpl::updateDevices() void LLWebRTCImpl::OnDevicesUpdated() { - // reset these to a bad value so an update is forced - mRecordingDevice = RECORD_DEVICE_BAD; - mPlayoutDevice = PLAYOUT_DEVICE_BAD; + mRecordingDevice = RECORD_DEVICE_DEFAULT; + mPlayoutDevice = PLAYOUT_DEVICE_DEFAULT; updateDevices(); } @@ -604,52 +548,55 @@ void LLWebRTCImpl::OnDevicesUpdated() void LLWebRTCImpl::setTuningMode(bool enable) { mTuningMode = enable; + deployDevices(); +} + +void LLWebRTCImpl::deployDevices() +{ mWorkerThread->PostTask( - [this, enable] { - if (enable) + [this] { + if (mTuningMode) { - mPeerDeviceModule->StopRecording(); mPeerDeviceModule->StopPlayout(); + mPeerDeviceModule->StopRecording(); + mTuningDeviceModule->StopPlayout(); + mTuningDeviceModule->StopRecording(); ll_set_device_module_render_device(mTuningDeviceModule, mPlayoutDevice); ll_set_device_module_capture_device(mTuningDeviceModule, mRecordingDevice); - mTuningDeviceModule->InitPlayout(); - mTuningDeviceModule->InitRecording(); mTuningDeviceModule->StartRecording(); // TODO: Starting Playout on the TDM appears to create an audio artifact (click) // in this case, so disabling it for now. We may have to do something different // if we enable 'echo playback' via the TDM when tuning. + //mTuningDeviceModule->InitPlayout(); //mTuningDeviceModule->StartPlayout(); } else { + mTuningDeviceModule->StopPlayout(); mTuningDeviceModule->StopRecording(); - //mTuningDeviceModule->StopPlayout(); + mPeerDeviceModule->StopPlayout(); + mPeerDeviceModule->StopRecording(); ll_set_device_module_render_device(mPeerDeviceModule, mPlayoutDevice); ll_set_device_module_capture_device(mPeerDeviceModule, mRecordingDevice); - mPeerDeviceModule->SetStereoPlayout(true); - mPeerDeviceModule->SetStereoRecording(false); - mPeerDeviceModule->InitPlayout(); - mPeerDeviceModule->InitRecording(); mPeerDeviceModule->StartPlayout(); mPeerDeviceModule->StartRecording(); } - } - ); - mSignalingThread->PostTask( - [this, enable] - { - for (auto &connection : mPeerConnections) - { - if (enable) - { - connection->enableSenderTracks(false); - } - else + mSignalingThread->PostTask( + [this] { - connection->resetMute(); - } - connection->enableReceiverTracks(!enable); - } + for (auto &connection : mPeerConnections) + { + if (mTuningMode) + { + connection->enableSenderTracks(false); + } + else + { + connection->resetMute(); + } + connection->enableReceiverTracks(!mTuningMode); + } + }); }); } @@ -951,10 +898,12 @@ void LLWebRTCPeerConnectionImpl::AnswerAvailable(const std::string &sdp) void LLWebRTCPeerConnectionImpl::setMute(bool mute) { EMicMuteState new_state = mute ? MUTE_MUTED : MUTE_UNMUTED; - if (new_state == mMute) - { - return; // no change - } + + // even if mute hasn't changed, we still need to update the mute + // state on the connections to handle cases where the 'Default' device + // has changed in the OS (unplugged headset, etc.) which messes + // with the mute state. + bool force_reset = mMute == MUTE_INITIAL && mute; bool enable = !mute; mMute = new_state; diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h index b6294dbd4a5..f379daa7d74 100644 --- a/indra/llwebrtc/llwebrtc_impl.h +++ b/indra/llwebrtc/llwebrtc_impl.h @@ -299,6 +299,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS // Devices void updateDevices(); + void deployDevices(); rtc::scoped_refptr mTuningDeviceModule; rtc::scoped_refptr mPeerDeviceModule; std::vector mVoiceDevicesObserverList; diff --git a/indra/newview/llvoicewebrtc.cpp b/indra/newview/llvoicewebrtc.cpp index 34f3e22182f..6e00a2dfb44 100644 --- a/indra/newview/llvoicewebrtc.cpp +++ b/indra/newview/llvoicewebrtc.cpp @@ -695,21 +695,38 @@ void LLWebRTCVoiceClient::OnDevicesChangedImpl(const llwebrtc::LLWebRTCVoiceDevi std::string outputDevice = gSavedSettings.getString("VoiceOutputAudioDevice"); LL_DEBUGS("Voice") << "Setting devices to-input: '" << inputDevice << "' output: '" << outputDevice << "'" << LL_ENDL; - clearRenderDevices(); - for (auto &device : render_devices) + + // only set the render device if the device list has changed. + if (mRenderDevices.size() != render_devices.size() || !std::equal(mRenderDevices.begin(), + mRenderDevices.end(), + render_devices.begin(), + [](const LLVoiceDevice& a, const llwebrtc::LLWebRTCVoiceDevice& b) { + return a.display_name == b.mDisplayName && a.full_name == b.mID; })) { - addRenderDevice(LLVoiceDevice(device.mDisplayName, device.mID)); + clearRenderDevices(); + for (auto& device : render_devices) + { + addRenderDevice(LLVoiceDevice(device.mDisplayName, device.mID)); + } + setRenderDevice(outputDevice); } - setRenderDevice(outputDevice); - clearCaptureDevices(); - for (auto &device : capture_devices) + // only set the capture device if the device list has changed. + if (mCaptureDevices.size() != capture_devices.size() ||!std::equal(mCaptureDevices.begin(), + mCaptureDevices.end(), + capture_devices.begin(), + [](const LLVoiceDevice& a, const llwebrtc::LLWebRTCVoiceDevice& b) + { return a.display_name == b.mDisplayName && a.full_name == b.mID; })) { - LL_DEBUGS("Voice") << "Checking capture device:'" << device.mID << "'" << LL_ENDL; + clearCaptureDevices(); + for (auto& device : capture_devices) + { + LL_DEBUGS("Voice") << "Checking capture device:'" << device.mID << "'" << LL_ENDL; - addCaptureDevice(LLVoiceDevice(device.mDisplayName, device.mID)); + addCaptureDevice(LLVoiceDevice(device.mDisplayName, device.mID)); + } + setCaptureDevice(inputDevice); } - setCaptureDevice(inputDevice); setDevicesListUpdated(true); } From 1519e48c4b8297ae0f1360d8da85334ea927f6d4 Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Thu, 21 Aug 2025 11:30:58 -0700 Subject: [PATCH 02/14] Fix race condition when multiple change device requests might come in at once --- indra/llwebrtc/llwebrtc.cpp | 7 +++++++ indra/llwebrtc/llwebrtc_impl.h | 1 + 2 files changed, 8 insertions(+) diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index 5e937270dc0..dff508c6a3d 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -162,6 +162,7 @@ LLWebRTCImpl::LLWebRTCImpl(LLWebRTCLogCallback* logCallback) : mPeerCustomProcessor(nullptr), mMute(true), mTuningMode(false), + mDevicesDeploying(false), mPlayoutDevice(0), mRecordingDevice(0), mTuningAudioDeviceObserver(nullptr) @@ -553,6 +554,11 @@ void LLWebRTCImpl::setTuningMode(bool enable) void LLWebRTCImpl::deployDevices() { + if (mDevicesDeploying) + { + return; + } + mDevicesDeploying = true; mWorkerThread->PostTask( [this] { if (mTuningMode) @@ -596,6 +602,7 @@ void LLWebRTCImpl::deployDevices() } connection->enableReceiverTracks(!mTuningMode); } + mDevicesDeploying = false; }); }); } diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h index f379daa7d74..f4841b9cbdf 100644 --- a/indra/llwebrtc/llwebrtc_impl.h +++ b/indra/llwebrtc/llwebrtc_impl.h @@ -300,6 +300,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS // Devices void updateDevices(); void deployDevices(); + bool mDevicesDeploying; rtc::scoped_refptr mTuningDeviceModule; rtc::scoped_refptr mPeerDeviceModule; std::vector mVoiceDevicesObserverList; From ac898675ad2817fd664b0022e3a04df746d3789b Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Sun, 31 Aug 2025 14:36:12 -0700 Subject: [PATCH 03/14] Update to m137 The primary feature of this commit is to update libwebrtc from m114 to m137. This is needed to make webrtc buildable, as m114 is not buildable by the current toolset. m137 had some changes to the API, which required renaming or changing namespace of some of the calls. Additionally, this PR moves from a callback mechanism for gathering the energy levels for tuning to a wrapper AudioDeviceModule, which gives us more control over the audio stream. Finally, the new m137-based webrtc has been updated to allow for 192khz audio streams. --- autobuild.xml | 14 +- indra/llwebrtc/CMakeLists.txt | 2 +- indra/llwebrtc/llwebrtc.cpp | 439 +++++++++++++++++---------------- indra/llwebrtc/llwebrtc_impl.h | 354 ++++++++++++++++++++------ 4 files changed, 511 insertions(+), 298 deletions(-) diff --git a/autobuild.xml b/autobuild.xml index d1d2f735aff..6cd8b3e60a5 100644 --- a/autobuild.xml +++ b/autobuild.xml @@ -2717,11 +2717,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors archive hash - 6314fdcee81a3538a7d960178ade66301c2fa002 + e41e3a4e9e07bcbf553e725eb468ba1c1943abfa hash_algorithm sha1 url - https://github.com/secondlife/3p-webrtc-build/releases/download/m114.5735.08.73-alpha/webrtc-m114.5735.08.73-alpha.11958809572-darwin64-11958809572.tar.zst + https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.9/webrtc-m137.7151.04.9.17354044714-darwin64-17354044714.tar.zst name darwin64 @@ -2731,11 +2731,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors archive hash - 95d7730a3d6955697e043f3fdf20ebdcc0c71fc0 + 586254d0c87cdaf9bb379ad36f161d42c499cfb3 hash_algorithm sha1 url - https://github.com/secondlife/3p-webrtc-build/releases/download/m114.5735.08.73-alpha/webrtc-m114.5735.08.73-alpha.11958809572-linux64-11958809572.tar.zst + https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.9/webrtc-m137.7151.04.9.17354044714-linux64-17354044714.tar.zst name linux64 @@ -2745,11 +2745,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors archive hash - c7b329d6409576af6eb5b80655b007f52639c43b + 3159f7e003c98a6ba3e286bb5f716a2127fe9843 hash_algorithm sha1 url - https://github.com/secondlife/3p-webrtc-build/releases/download/m114.5735.08.73-alpha/webrtc-m114.5735.08.73-alpha.11958809572-windows64-11958809572.tar.zst + https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.9/webrtc-m137.7151.04.9.17354044714-windows64-17354044714.tar.zst name windows64 @@ -2762,7 +2762,7 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors copyright Copyright (c) 2011, The WebRTC project authors. All rights reserved. version - m114.5735.08.73-alpha.11958809572 + m137.7151.04.9.17354044714 name webrtc vcs_branch diff --git a/indra/llwebrtc/CMakeLists.txt b/indra/llwebrtc/CMakeLists.txt index 4fde489942c..eb10f4eee49 100644 --- a/indra/llwebrtc/CMakeLists.txt +++ b/indra/llwebrtc/CMakeLists.txt @@ -42,7 +42,7 @@ if (WINDOWS) iphlpapi libcmt) # as the webrtc libraries are release, build this binary as release as well. - target_compile_options(llwebrtc PRIVATE "/MT") + target_compile_options(llwebrtc PRIVATE "/MT" "/Zc:wchar_t") if (USE_BUGSPLAT) set_target_properties(llwebrtc PROPERTIES PDB_OUTPUT_DIRECTORY "${SYMBOLS_STAGING_DIR}") endif (USE_BUGSPLAT) diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index dff508c6a3d..509714e9d72 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -32,10 +32,12 @@ #include "api/audio_codecs/audio_encoder_factory.h" #include "api/audio_codecs/builtin_audio_decoder_factory.h" #include "api/audio_codecs/builtin_audio_encoder_factory.h" +#include "api/audio/builtin_audio_processing_builder.h" #include "api/media_stream_interface.h" #include "api/media_stream_track.h" #include "modules/audio_processing/audio_buffer.h" #include "modules/audio_mixer/audio_mixer_impl.h" +#include "api/environment/environment_factory.h" namespace llwebrtc { @@ -48,23 +50,54 @@ static int16_t RECORD_DEVICE_DEFAULT = 0; #endif -LLAudioDeviceObserver::LLAudioDeviceObserver() : mSumVector {0}, mMicrophoneEnergy(0.0) {} +// +// LLWebRTCAudioTransport implementation +// -float LLAudioDeviceObserver::getMicrophoneEnergy() { return mMicrophoneEnergy; } +LLWebRTCAudioTransport::LLWebRTCAudioTransport() : mMicrophoneEnergy(0.0) +{ + memset(mSumVector, 0, sizeof(mSumVector)); +} -// TODO: Pull smoothing/filtering code into a common helper function -// for LLAudioDeviceObserver and LLCustomProcessor +void LLWebRTCAudioTransport::SetEngineTransport(webrtc::AudioTransport* t) +{ + engine_.store(t, std::memory_order_release); +} -void LLAudioDeviceObserver::OnCaptureData(const void *audio_samples, - const size_t num_samples, - const size_t bytes_per_sample, - const size_t num_channels, - const uint32_t samples_per_sec) +int32_t LLWebRTCAudioTransport::RecordedDataIsAvailable(const void* audio_data, + size_t number_of_samples, + size_t bytes_per_sample, + size_t number_of_channels, + uint32_t samples_per_sec, + uint32_t total_delay_ms, + int32_t clock_drift, + uint32_t current_mic_level, + bool key_pressed, + uint32_t& new_mic_level) { + auto* engine = engine_.load(std::memory_order_acquire); + + // 1) Deliver to engine (authoritative). + int32_t ret = 0; + if (engine) + { + ret = engine->RecordedDataIsAvailable(audio_data, + number_of_samples, + bytes_per_sample, + number_of_channels, + samples_per_sec, + total_delay_ms, + clock_drift, + current_mic_level, + key_pressed, + new_mic_level); + } + + // 2) Calculate energy for microphone level monitoring // calculate the energy float energy = 0; - const short *samples = (const short *) audio_samples; - for (size_t index = 0; index < num_samples * num_channels; index++) + const short *samples = (const short *) audio_data; + for (size_t index = 0; index < number_of_samples * number_of_channels; index++) { float sample = (static_cast(samples[index]) / (float) 32767); energy += sample * sample; @@ -81,18 +114,59 @@ void LLAudioDeviceObserver::OnCaptureData(const void *audio_samples, } mSumVector[i] = energy; totalSum += energy; - mMicrophoneEnergy = std::sqrt(totalSum / (num_samples * buffer_size)); + mMicrophoneEnergy = std::sqrt(totalSum / (number_of_samples * buffer_size)); + + return ret; } -void LLAudioDeviceObserver::OnRenderData(const void *audio_samples, - const size_t num_samples, - const size_t bytes_per_sample, - const size_t num_channels, - const uint32_t samples_per_sec) +int32_t LLWebRTCAudioTransport::NeedMorePlayData(size_t number_of_samples, + size_t bytes_per_sample, + size_t number_of_channels, + uint32_t samples_per_sec, + void* audio_data, + size_t& number_of_samples_out, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) { + auto* engine = engine_.load(std::memory_order_acquire); + if (!engine) + { + // No engine sink; output silence to be safe. + const size_t bytes = number_of_samples * bytes_per_sample * number_of_channels; + memset(audio_data, 0, bytes); + number_of_samples_out = number_of_samples; + return 0; + } + + // Only the engine should fill the buffer. + return engine->NeedMorePlayData(number_of_samples, + bytes_per_sample, + number_of_channels, + samples_per_sec, + audio_data, + number_of_samples_out, + elapsed_time_ms, + ntp_time_ms); +} + +void LLWebRTCAudioTransport::PullRenderData(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) +{ + auto* engine = engine_.load(std::memory_order_acquire); + + if (engine) + { + engine + ->PullRenderData(bits_per_sample, sample_rate, number_of_channels, number_of_frames, audio_data, elapsed_time_ms, ntp_time_ms); + } } -LLCustomProcessor::LLCustomProcessor() : mSampleRateHz(0), mNumChannels(0), mMicrophoneEnergy(0.0), mGain(1.0) +LLCustomProcessor::LLCustomProcessor(LLCustomProcessorStatePtr state) : mSampleRateHz(0), mNumChannels(0), mState(state) { memset(mSumVector, 0, sizeof(mSumVector)); } @@ -129,10 +203,11 @@ void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in) // calculate the energy float energy = 0; + float gain = mState->getGain(); for (size_t index = 0; index < stream_config.num_samples(); index++) { float sample = frame_samples[index]; - sample = sample * mGain; // apply gain + sample = sample * gain; // apply gain frame_samples[index] = sample; // write processed sample back to buffer. energy += sample * sample; } @@ -150,7 +225,7 @@ void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in) } mSumVector[i] = energy; totalSum += energy; - mMicrophoneEnergy = std::sqrt(totalSum / (stream_config.num_samples() * buffer_size)); + mState->setMicrophoneEnergy(std::sqrt(totalSum / (stream_config.num_samples() * buffer_size))); } // @@ -163,82 +238,48 @@ LLWebRTCImpl::LLWebRTCImpl(LLWebRTCLogCallback* logCallback) : mMute(true), mTuningMode(false), mDevicesDeploying(false), - mPlayoutDevice(0), - mRecordingDevice(0), - mTuningAudioDeviceObserver(nullptr) + mPlayoutDevice(PLAYOUT_DEVICE_DEFAULT), + mRecordingDevice(RECORD_DEVICE_DEFAULT) { } void LLWebRTCImpl::init() { - mPlayoutDevice = 0; - mRecordingDevice = 0; - rtc::InitializeSSL(); + webrtc::InitializeSSL(); // Normal logging is rather spammy, so turn it off. - rtc::LogMessage::LogToDebug(rtc::LS_NONE); - rtc::LogMessage::SetLogToStderr(true); - rtc::LogMessage::AddLogToStream(mLogSink, rtc::LS_VERBOSE); + webrtc::LogMessage::LogToDebug(webrtc::LS_NONE); + webrtc::LogMessage::SetLogToStderr(true); + webrtc::LogMessage::AddLogToStream(mLogSink, webrtc::LS_VERBOSE); mTaskQueueFactory = webrtc::CreateDefaultTaskQueueFactory(); // Create the native threads. - mNetworkThread = rtc::Thread::CreateWithSocketServer(); + mNetworkThread = webrtc::Thread::CreateWithSocketServer(); mNetworkThread->SetName("WebRTCNetworkThread", nullptr); mNetworkThread->Start(); - mWorkerThread = rtc::Thread::Create(); + mWorkerThread = webrtc::Thread::Create(); mWorkerThread->SetName("WebRTCWorkerThread", nullptr); mWorkerThread->Start(); - mSignalingThread = rtc::Thread::Create(); + mSignalingThread = webrtc::Thread::Create(); mSignalingThread->SetName("WebRTCSignalingThread", nullptr); mSignalingThread->Start(); - mTuningAudioDeviceObserver = new LLAudioDeviceObserver; - mWorkerThread->PostTask( - [this]() - { - // Initialize the audio devices on the Worker Thread - mTuningDeviceModule = - webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, - mTaskQueueFactory.get(), - std::unique_ptr(mTuningAudioDeviceObserver)); - - mTuningDeviceModule->Init(); - mTuningDeviceModule->SetPlayoutDevice(mPlayoutDevice); - mTuningDeviceModule->SetRecordingDevice(mRecordingDevice); - mTuningDeviceModule->EnableBuiltInAEC(false); - mTuningDeviceModule->SetAudioDeviceSink(this); - mTuningDeviceModule->InitMicrophone(); - mTuningDeviceModule->InitSpeaker(); - mTuningDeviceModule->SetStereoRecording(false); - mTuningDeviceModule->SetStereoPlayout(true); - mTuningDeviceModule->InitRecording(); - mTuningDeviceModule->InitPlayout(); - updateDevices(); - }); - mWorkerThread->BlockingCall( [this]() { - // the peer device module doesn't need an observer - // as we pull peer data after audio processing. - mPeerDeviceModule = webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, - mTaskQueueFactory.get(), - nullptr); - mPeerDeviceModule->Init(); - mPeerDeviceModule->SetPlayoutDevice(mPlayoutDevice); - mPeerDeviceModule->SetRecordingDevice(mRecordingDevice); - mPeerDeviceModule->EnableBuiltInAEC(false); - mPeerDeviceModule->InitMicrophone(); - mPeerDeviceModule->InitSpeaker(); + webrtc::scoped_refptr realADM = + webrtc::AudioDeviceModule::Create(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, mTaskQueueFactory.get()); + mDeviceModule = webrtc::make_ref_counted(realADM); + mDeviceModule->SetObserver(this); }); // The custom processor allows us to retrieve audio data (and levels) // from after other audio processing such as AEC, AGC, etc. - mPeerCustomProcessor = new LLCustomProcessor; - webrtc::AudioProcessingBuilder apb; - apb.SetCapturePostProcessing(std::unique_ptr(mPeerCustomProcessor)); - mAudioProcessingModule = apb.Create(); + mPeerCustomProcessor = std::make_shared(); + webrtc::BuiltinAudioProcessingBuilder apb; + apb.SetCapturePostProcessing(std::make_unique(mPeerCustomProcessor)); + mAudioProcessingModule = apb.Build(webrtc::CreateEnvironment()); webrtc::AudioProcessing::Config apm_config; apm_config.echo_canceller.enabled = false; @@ -270,13 +311,20 @@ void LLWebRTCImpl::init() mPeerConnectionFactory = webrtc::CreatePeerConnectionFactory(mNetworkThread.get(), mWorkerThread.get(), mSignalingThread.get(), - mPeerDeviceModule, + mDeviceModule, webrtc::CreateBuiltinAudioEncoderFactory(), webrtc::CreateBuiltinAudioDecoderFactory(), nullptr /* video_encoder_factory */, nullptr /* video_decoder_factory */, nullptr /* audio_mixer */, mAudioProcessingModule); + mWorkerThread->PostTask( + [this]() + { + mDeviceModule->EnableBuiltInAEC(false); + workerDeployDevices(); + updateDevices(); + }); } @@ -298,64 +346,16 @@ void LLWebRTCImpl::terminate() mWorkerThread->BlockingCall( [this]() { - if (mTuningDeviceModule) - { - mTuningDeviceModule->StopRecording(); - mTuningDeviceModule->Terminate(); - } - if (mPeerDeviceModule) - { - mPeerDeviceModule->StopRecording(); - mPeerDeviceModule->Terminate(); - } - mTuningDeviceModule = nullptr; - mPeerDeviceModule = nullptr; - mTaskQueueFactory = nullptr; - }); - rtc::LogMessage::RemoveLogToStream(mLogSink); -} - -// -// Devices functions -// -// Most device-related functionality needs to happen -// on the worker thread (the audio thread,) so those calls will be -// proxied over to that thread. -// -void LLWebRTCImpl::setRecording(bool recording) -{ - mWorkerThread->PostTask( - [this, recording]() - { - if (recording) - { - mPeerDeviceModule->SetStereoRecording(false); - mPeerDeviceModule->InitRecording(); - mPeerDeviceModule->StartRecording(); - } - else - { - mPeerDeviceModule->StopRecording(); - } - }); -} - -void LLWebRTCImpl::setPlayout(bool playing) -{ - mWorkerThread->PostTask( - [this, playing]() - { - if (playing) + if (mDeviceModule) { - mPeerDeviceModule->SetStereoPlayout(true); - mPeerDeviceModule->InitPlayout(); - mPeerDeviceModule->StartPlayout(); - } - else - { - mPeerDeviceModule->StopPlayout(); + mDeviceModule->StopRecording(); + mDeviceModule->StopPlayout(); + mDeviceModule->Terminate(); } + mDeviceModule = nullptr; + mTaskQueueFactory = nullptr; }); + webrtc::LogMessage::RemoveLogToStream(mLogSink); } void LLWebRTCImpl::setAudioConfig(LLWebRTCDeviceInterface::AudioConfig config) @@ -418,20 +418,55 @@ void LLWebRTCImpl::unsetDevicesObserver(LLWebRTCDevicesObserver *observer) } } -void ll_set_device_module_capture_device(rtc::scoped_refptr device_module, int16_t device) +// must be run in the worker thread. +void LLWebRTCImpl::workerDeployDevices() { - device_module->SetRecordingDevice(device); - device_module->InitMicrophone(); - device_module->SetStereoRecording(false); - device_module->InitRecording(); -} + mDeviceModule->StopPlayout(); + mDeviceModule->ForceStopRecording(); +#if WEBRTC_WIN + if (mRecordingDevice < 0) { + mDeviceModule->SetRecordingDevice((webrtc::AudioDeviceModule::WindowsDeviceType)mRecordingDevice); + } + else + { + mDeviceModule->SetRecordingDevice(mRecordingDevice); + } +#else + mDeviceModule->SetRecordingDevice(mRecordingDevice); +#endif + mDeviceModule->InitMicrophone(); + mDeviceModule->SetStereoRecording(false); + mDeviceModule->InitRecording(); -void ll_set_device_module_render_device(rtc::scoped_refptr device_module, int16_t device) -{ - device_module->SetPlayoutDevice(device); - device_module->InitSpeaker(); - device_module->SetStereoPlayout(true); - device_module->InitPlayout(); +#if WEBRTC_WIN + if (mPlayoutDevice < 0) + { + mDeviceModule->SetPlayoutDevice((webrtc::AudioDeviceModule::WindowsDeviceType)mPlayoutDevice); + } + else + { + mDeviceModule->SetPlayoutDevice(mPlayoutDevice); + } +#else + mDeviceModule->SetPlayoutDevice(mPlayoutDevice); +#endif + mDeviceModule->InitSpeaker(); + mDeviceModule->SetStereoPlayout(true); + mDeviceModule->InitPlayout(); + mDeviceModule->ForceStartRecording(); + uint32_t min_v = 0, max_v = 0, cur_v = 0; + bool have_hw = false; + + mDeviceModule->MicrophoneVolumeIsAvailable(&have_hw); + if (have_hw) + { + mDeviceModule->MinMicrophoneVolume(&min_v); + mDeviceModule->MaxMicrophoneVolume(&max_v); + uint32_t target = min_v + (max_v - min_v) * 8 / 10; // ~80% + mDeviceModule->SetMicrophoneVolume(target); + mDeviceModule->MicrophoneVolume(&cur_v); + } + mDeviceModule->StartPlayout(); } void LLWebRTCImpl::setCaptureDevice(const std::string &id) @@ -460,6 +495,9 @@ void LLWebRTCImpl::setCaptureDevice(const std::string &id) } } + // Always deploy devices, as we may have received a device update + // for the default device, which may be the same as mRecordingDevice + // but still needs to be refreshed. mRecordingDevice = recordingDevice; deployDevices(); } @@ -488,6 +526,10 @@ void LLWebRTCImpl::setRenderDevice(const std::string &id) } } } + + // Always deploy devices, as we may have received a device update + // for the default device, which may be the same as mPlayoutDevice + // but still needs to be refreshed. mPlayoutDevice = playoutDevice; deployDevices(); } @@ -495,7 +537,7 @@ void LLWebRTCImpl::setRenderDevice(const std::string &id) // updateDevices needs to happen on the worker thread. void LLWebRTCImpl::updateDevices() { - int16_t renderDeviceCount = mTuningDeviceModule->PlayoutDevices(); + int16_t renderDeviceCount = mDeviceModule->PlayoutDevices(); mPlayoutDeviceList.clear(); #if WEBRTC_WIN @@ -509,11 +551,11 @@ void LLWebRTCImpl::updateDevices() { char name[webrtc::kAdmMaxDeviceNameSize]; char guid[webrtc::kAdmMaxGuidSize]; - mTuningDeviceModule->PlayoutDeviceName(index, name, guid); + mDeviceModule->PlayoutDeviceName(index, name, guid); mPlayoutDeviceList.emplace_back(name, guid); } - int16_t captureDeviceCount = mTuningDeviceModule->RecordingDevices(); + int16_t captureDeviceCount = mDeviceModule->RecordingDevices(); mRecordingDeviceList.clear(); #if WEBRTC_WIN @@ -527,7 +569,7 @@ void LLWebRTCImpl::updateDevices() { char name[webrtc::kAdmMaxDeviceNameSize]; char guid[webrtc::kAdmMaxGuidSize]; - mTuningDeviceModule->RecordingDeviceName(index, name, guid); + mDeviceModule->RecordingDeviceName(index, name, guid); mRecordingDeviceList.emplace_back(name, guid); } @@ -549,7 +591,28 @@ void LLWebRTCImpl::OnDevicesUpdated() void LLWebRTCImpl::setTuningMode(bool enable) { mTuningMode = enable; - deployDevices(); + mDeviceModule->SetTuning(mTuningMode); + mWorkerThread->PostTask( + [this] + { + mDeviceModule->SetTuning(mTuningMode); + mSignalingThread->PostTask( + [this] + { + for (auto& connection : mPeerConnections) + { + if (mTuningMode) + { + connection->enableSenderTracks(false); + } + else + { + connection->resetMute(); + } + connection->enableReceiverTracks(!mTuningMode); + } + }); + }); } void LLWebRTCImpl::deployDevices() @@ -561,32 +624,7 @@ void LLWebRTCImpl::deployDevices() mDevicesDeploying = true; mWorkerThread->PostTask( [this] { - if (mTuningMode) - { - mPeerDeviceModule->StopPlayout(); - mPeerDeviceModule->StopRecording(); - mTuningDeviceModule->StopPlayout(); - mTuningDeviceModule->StopRecording(); - ll_set_device_module_render_device(mTuningDeviceModule, mPlayoutDevice); - ll_set_device_module_capture_device(mTuningDeviceModule, mRecordingDevice); - mTuningDeviceModule->StartRecording(); - // TODO: Starting Playout on the TDM appears to create an audio artifact (click) - // in this case, so disabling it for now. We may have to do something different - // if we enable 'echo playback' via the TDM when tuning. - //mTuningDeviceModule->InitPlayout(); - //mTuningDeviceModule->StartPlayout(); - } - else - { - mTuningDeviceModule->StopPlayout(); - mTuningDeviceModule->StopRecording(); - mPeerDeviceModule->StopPlayout(); - mPeerDeviceModule->StopRecording(); - ll_set_device_module_render_device(mPeerDeviceModule, mPlayoutDevice); - ll_set_device_module_capture_device(mPeerDeviceModule, mRecordingDevice); - mPeerDeviceModule->StartPlayout(); - mPeerDeviceModule->StartRecording(); - } + workerDeployDevices(); mSignalingThread->PostTask( [this] { @@ -607,11 +645,11 @@ void LLWebRTCImpl::deployDevices() }); } -float LLWebRTCImpl::getTuningAudioLevel() { return -20 * log10f(mTuningAudioDeviceObserver->getMicrophoneEnergy()); } +float LLWebRTCImpl::getTuningAudioLevel() { return mDeviceModule ? -20 * log10f(mDeviceModule->GetMicrophoneEnergy()) : 0.0f; } -float LLWebRTCImpl::getPeerConnectionAudioLevel() { return -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()); } +float LLWebRTCImpl::getPeerConnectionAudioLevel() { return mPeerCustomProcessor ? -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()) : 0.0f; } -void LLWebRTCImpl::setPeerConnectionGain(float gain) { mPeerCustomProcessor->setGain(gain); } +void LLWebRTCImpl::setPeerConnectionGain(float gain) { if (mPeerCustomProcessor) mPeerCustomProcessor->setGain(gain); } // @@ -620,35 +658,23 @@ void LLWebRTCImpl::setPeerConnectionGain(float gain) { mPeerCustomProcessor->set LLWebRTCPeerConnectionInterface *LLWebRTCImpl::newPeerConnection() { - rtc::scoped_refptr peerConnection = rtc::scoped_refptr(new rtc::RefCountedObject()); + bool empty = mPeerConnections.empty(); + webrtc::scoped_refptr peerConnection = webrtc::scoped_refptr(new webrtc::RefCountedObject()); peerConnection->init(this); mPeerConnections.emplace_back(peerConnection); - // Should it really start disabled? - // Seems like something doesn't get the memo and senders need to be reset later - // to remove the voice indicator from taskbar peerConnection->enableSenderTracks(false); - if (mPeerConnections.empty()) - { - setRecording(true); - setPlayout(true); - } return peerConnection.get(); } void LLWebRTCImpl::freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection) { - std::vector>::iterator it = + std::vector>::iterator it = std::find(mPeerConnections.begin(), mPeerConnections.end(), peer_connection); if (it != mPeerConnections.end()) { mPeerConnections.erase(it); } - if (mPeerConnections.empty()) - { - setRecording(false); - setPlayout(false); - } } @@ -707,7 +733,6 @@ void LLWebRTCPeerConnectionImpl::terminate() track->set_enabled(false); } } - mPeerConnection->SetAudioRecording(false); mPeerConnection->Close(); if (mLocalStream) @@ -794,7 +819,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti mDataChannel->RegisterObserver(this); } - cricket::AudioOptions audioOptions; + webrtc::AudioOptions audioOptions; audioOptions.auto_gain_control = true; audioOptions.echo_cancellation = true; audioOptions.noise_suppression = true; @@ -802,7 +827,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti mLocalStream = mPeerConnectionFactory->CreateLocalMediaStream("SLStream"); - rtc::scoped_refptr audio_track( + webrtc::scoped_refptr audio_track( mPeerConnectionFactory->CreateAudioTrack("SLAudio", mPeerConnectionFactory->CreateAudioSource(audioOptions).get())); audio_track->set_enabled(false); mLocalStream->AddTrack(audio_track); @@ -816,7 +841,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti webrtc::RtpParameters params; webrtc::RtpCodecParameters codecparam; codecparam.name = "opus"; - codecparam.kind = cricket::MEDIA_TYPE_AUDIO; + codecparam.kind = webrtc::MediaType::AUDIO; codecparam.clock_rate = 48000; codecparam.num_channels = 2; codecparam.parameters["stereo"] = "1"; @@ -831,7 +856,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti webrtc::RtpParameters params; webrtc::RtpCodecParameters codecparam; codecparam.name = "opus"; - codecparam.kind = cricket::MEDIA_TYPE_AUDIO; + codecparam.kind = webrtc::MediaType::AUDIO; codecparam.clock_rate = 48000; codecparam.num_channels = 2; codecparam.parameters["stereo"] = "1"; @@ -858,7 +883,6 @@ void LLWebRTCPeerConnectionImpl::enableSenderTracks(bool enable) // set_enabled shouldn't be done on the worker thread. if (mPeerConnection) { - mPeerConnection->SetAudioRecording(enable); auto senders = mPeerConnection->GetSenders(); for (auto &sender : senders) { @@ -892,7 +916,7 @@ void LLWebRTCPeerConnectionImpl::AnswerAvailable(const std::string &sdp) { RTC_LOG(LS_INFO) << __FUNCTION__ << " " << mPeerConnection->peer_connection_state(); mPeerConnection->SetRemoteDescription(webrtc::CreateSessionDescription(webrtc::SdpType::kAnswer, sdp), - rtc::scoped_refptr(this)); + webrtc::scoped_refptr(this)); } }); } @@ -920,9 +944,6 @@ void LLWebRTCPeerConnectionImpl::setMute(bool mute) { if (mPeerConnection) { - // SetAudioRecording must be called before enabling/disabling tracks. - mPeerConnection->SetAudioRecording(enable); - auto senders = mPeerConnection->GetSenders(); RTC_LOG(LS_INFO) << __FUNCTION__ << (mMute ? "disabling" : "enabling") << " streams count " << senders.size(); @@ -1002,14 +1023,14 @@ void LLWebRTCPeerConnectionImpl::setSendVolume(float volume) // PeerConnectionObserver implementation. // -void LLWebRTCPeerConnectionImpl::OnAddTrack(rtc::scoped_refptr receiver, - const std::vector> &streams) +void LLWebRTCPeerConnectionImpl::OnAddTrack(webrtc::scoped_refptr receiver, + const std::vector> &streams) { RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id(); webrtc::RtpParameters params; webrtc::RtpCodecParameters codecparam; codecparam.name = "opus"; - codecparam.kind = cricket::MEDIA_TYPE_AUDIO; + codecparam.kind = webrtc::MediaType::AUDIO; codecparam.clock_rate = 48000; codecparam.num_channels = 2; codecparam.parameters["stereo"] = "1"; @@ -1018,12 +1039,12 @@ void LLWebRTCPeerConnectionImpl::OnAddTrack(rtc::scoped_refptrSetParameters(params); } -void LLWebRTCPeerConnectionImpl::OnRemoveTrack(rtc::scoped_refptr receiver) +void LLWebRTCPeerConnectionImpl::OnRemoveTrack(webrtc::scoped_refptr receiver) { RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id(); } -void LLWebRTCPeerConnectionImpl::OnDataChannel(rtc::scoped_refptr channel) +void LLWebRTCPeerConnectionImpl::OnDataChannel(webrtc::scoped_refptr channel) { if (mDataChannel) { @@ -1110,23 +1131,23 @@ static std::string iceCandidateToTrickleString(const webrtc::IceCandidateInterfa candidate->candidate().address().ipaddr().ToString() << " " << candidate->candidate().address().PortAsString() << " typ "; - if (candidate->candidate().type() == cricket::LOCAL_PORT_TYPE) + if (candidate->candidate().type() == webrtc::IceCandidateType::kHost) { candidate_stream << "host"; } - else if (candidate->candidate().type() == cricket::STUN_PORT_TYPE) + else if (candidate->candidate().type() == webrtc::IceCandidateType::kSrflx) { candidate_stream << "srflx " << "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " << "rport " << candidate->candidate().related_address().PortAsString(); } - else if (candidate->candidate().type() == cricket::RELAY_PORT_TYPE) + else if (candidate->candidate().type() == webrtc::IceCandidateType::kRelay) { candidate_stream << "relay " << "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " << "rport " << candidate->candidate().related_address().PortAsString(); } - else if (candidate->candidate().type() == cricket::PRFLX_PORT_TYPE) + else if (candidate->candidate().type() == webrtc::IceCandidateType::kPrflx) { candidate_stream << "prflx " << "raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " << @@ -1221,7 +1242,7 @@ void LLWebRTCPeerConnectionImpl::OnSuccess(webrtc::SessionDescriptionInterface * mPeerConnection->SetLocalDescription(std::unique_ptr( webrtc::CreateSessionDescription(webrtc::SdpType::kOffer, mangled_sdp)), - rtc::scoped_refptr(this)); + webrtc::scoped_refptr(this)); } @@ -1331,7 +1352,7 @@ void LLWebRTCPeerConnectionImpl::sendData(const std::string& data, bool binary) { if (mDataChannel) { - rtc::CopyOnWriteBuffer cowBuffer(data.data(), data.length()); + webrtc::CopyOnWriteBuffer cowBuffer(data.data(), data.length()); webrtc::DataBuffer buffer(cowBuffer, binary); mWebRTCImpl->PostNetworkTask([this, buffer]() { if (mDataChannel) diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h index f4841b9cbdf..b7ebc6821ca 100644 --- a/indra/llwebrtc/llwebrtc_impl.h +++ b/indra/llwebrtc/llwebrtc_impl.h @@ -54,12 +54,12 @@ #include "rtc_base/ref_counted_object.h" #include "rtc_base/ssl_adapter.h" #include "rtc_base/thread.h" +#include "rtc_base/logging.h" #include "api/peer_connection_interface.h" #include "api/media_stream_interface.h" #include "api/create_peerconnection_factory.h" #include "modules/audio_device/include/audio_device.h" #include "modules/audio_device/include/audio_device_data_observer.h" -#include "rtc_base/task_queue.h" #include "api/task_queue/task_queue_factory.h" #include "api/task_queue/default_task_queue_factory.h" #include "modules/audio_device/include/audio_device_defines.h" @@ -69,35 +69,30 @@ namespace llwebrtc class LLWebRTCPeerConnectionImpl; -class LLWebRTCLogSink : public rtc::LogSink { +class LLWebRTCLogSink : public webrtc::LogSink +{ public: - LLWebRTCLogSink(LLWebRTCLogCallback* callback) : - mCallback(callback) - { - } + LLWebRTCLogSink(LLWebRTCLogCallback* callback) : mCallback(callback) {} // Destructor: close the log file - ~LLWebRTCLogSink() override - { - } + ~LLWebRTCLogSink() override {} - void OnLogMessage(const std::string& msg, - rtc::LoggingSeverity severity) override + void OnLogMessage(const std::string& msg, webrtc::LoggingSeverity severity) override { if (mCallback) { - switch(severity) + switch (severity) { - case rtc::LS_VERBOSE: + case webrtc::LS_VERBOSE: mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg); break; - case rtc::LS_INFO: + case webrtc::LS_INFO: mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg); break; - case rtc::LS_WARNING: + case webrtc::LS_WARNING: mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg); break; - case rtc::LS_ERROR: + case webrtc::LS_ERROR: mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg); break; default: @@ -118,44 +113,250 @@ class LLWebRTCLogSink : public rtc::LogSink { LLWebRTCLogCallback* mCallback; }; -// Implements a class allowing capture of audio data -// to determine audio level of the microphone. -class LLAudioDeviceObserver : public webrtc::AudioDeviceDataObserver +// ----------------------------------------------------------------------------- +// A proxy transport that forwards capture data to two AudioTransport sinks: +// - the "engine" (libwebrtc's VoiceEngine) +// - the "user" (your app's listener) +// +// Playout (NeedMorePlayData) goes only to the engine by default to avoid +// double-writing into the output buffer. See notes below if you want a tap. +// ----------------------------------------------------------------------------- +class LLWebRTCAudioTransport : public webrtc::AudioTransport { - public: - LLAudioDeviceObserver(); - - // Retrieve the RMS audio loudness - float getMicrophoneEnergy(); - - // Data retrieved from the caputure device is - // passed in here for processing. - void OnCaptureData(const void *audio_samples, - const size_t num_samples, - const size_t bytes_per_sample, - const size_t num_channels, - const uint32_t samples_per_sec) override; - - // This is for data destined for the render device. - // not currently used. - void OnRenderData(const void *audio_samples, - const size_t num_samples, - const size_t bytes_per_sample, - const size_t num_channels, - const uint32_t samples_per_sec) override; +public: + LLWebRTCAudioTransport(); + + void SetEngineTransport(webrtc::AudioTransport* t); + + // -------- Capture path: fan out to both sinks -------- + int32_t RecordedDataIsAvailable(const void* audio_data, + size_t number_of_samples, + size_t bytes_per_sample, + size_t number_of_channels, + uint32_t samples_per_sec, + uint32_t total_delay_ms, + int32_t clock_drift, + uint32_t current_mic_level, + bool key_pressed, + uint32_t& new_mic_level) override; + + // -------- Playout path: delegate to engine only -------- + int32_t NeedMorePlayData(size_t number_of_samples, + size_t bytes_per_sample, + size_t number_of_channels, + uint32_t samples_per_sec, + void* audio_data, + size_t& number_of_samples_out, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) override; + + // Method to pull mixed render audio data from all active VoE channels. + // The data will not be passed as reference for audio processing internally. + void PullRenderData(int bits_per_sample, + int sample_rate, + size_t number_of_channels, + size_t number_of_frames, + void* audio_data, + int64_t* elapsed_time_ms, + int64_t* ntp_time_ms) override; + + float GetMicrophoneEnergy() { return mMicrophoneEnergy; } - protected: - static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing (30 frames) - float mSumVector[NUM_PACKETS_TO_FILTER]; - float mMicrophoneEnergy; +private: + std::atomic engine_{ nullptr }; + static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing (30 frames) + float mSumVector[NUM_PACKETS_TO_FILTER]; + float mMicrophoneEnergy; +}; + + +// ----------------------------------------------------------------------------- +// LLWebRTCAudioDeviceModule +// - Wraps a real ADM to provide microphone energy for tuning +// ----------------------------------------------------------------------------- +class LLWebRTCAudioDeviceModule : public webrtc::AudioDeviceModule +{ +public: + explicit LLWebRTCAudioDeviceModule(webrtc::scoped_refptr inner) : inner_(std::move(inner)), tuning_(false) + { + RTC_CHECK(inner_); + } + + // ----- AudioDeviceModule interface: we mostly forward to |inner_| ----- + int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override { return inner_->ActiveAudioLayer(audioLayer); } + + int32_t RegisterAudioCallback(webrtc::AudioTransport* engine_transport) override + { + // The engine registers its transport here. We put our audio transport between engine and ADM. + audio_transport_.SetEngineTransport(engine_transport); + // Register our proxy with the real ADM. + return inner_->RegisterAudioCallback(&audio_transport_); + } + + int32_t Init() override { return inner_->Init(); } + int32_t Terminate() override { return inner_->Terminate(); } + bool Initialized() const override { return inner_->Initialized(); } + + // --- Device enumeration/selection (forward) --- + int16_t PlayoutDevices() override { return inner_->PlayoutDevices(); } + int16_t RecordingDevices() override { return inner_->RecordingDevices(); } + int32_t PlayoutDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) override + { + return inner_->PlayoutDeviceName(index, name, guid); + } + int32_t RecordingDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) override + { + return inner_->RecordingDeviceName(index, name, guid); + } + int32_t SetPlayoutDevice(uint16_t index) override { return inner_->SetPlayoutDevice(index); } + int32_t SetRecordingDevice(uint16_t index) override { return inner_->SetRecordingDevice(index); } + + // Windows default/communications selectors, if your branch exposes them: + int32_t SetPlayoutDevice(WindowsDeviceType type) override { return inner_->SetPlayoutDevice(type); } + int32_t SetRecordingDevice(WindowsDeviceType type) override { return inner_->SetRecordingDevice(type); } + + // --- Init/start/stop (forward) --- + int32_t InitPlayout() override { return inner_->InitPlayout(); } + bool PlayoutIsInitialized() const override { return inner_->PlayoutIsInitialized(); } + int32_t StartPlayout() override { + if (tuning_) return 0; // For tuning, don't allow playout + return inner_->StartPlayout(); + } + int32_t StopPlayout() override { return inner_->StopPlayout(); } + bool Playing() const override { return inner_->Playing(); } + + int32_t InitRecording() override { return inner_->InitRecording(); } + bool RecordingIsInitialized() const override { return inner_->RecordingIsInitialized(); } + int32_t StartRecording() override { return inner_->StartRecording(); } + int32_t StopRecording() override { + if (tuning_) return 0; // if we're tuning, disregard the StopRecording we get from disabling the streams + return inner_->StopRecording(); + } + int32_t ForceStartRecording() { return inner_->StartRecording(); } + int32_t ForceStopRecording() { return inner_->StopRecording(); } + bool Recording() const override { return inner_->Recording(); } + + // --- Stereo opts (forward if available on your branch) --- + int32_t SetStereoPlayout(bool enable) override { return inner_->SetStereoPlayout(enable); } + int32_t SetStereoRecording(bool enable) override { return inner_->SetStereoRecording(enable); } + int32_t PlayoutIsAvailable(bool* available) override { return inner_->PlayoutIsAvailable(available); } + int32_t RecordingIsAvailable(bool* available) override { return inner_->RecordingIsAvailable(available); } + + // --- AGC/Volume/Mute/etc. (forward) --- + int32_t SetMicrophoneVolume(uint32_t volume) override { return inner_->SetMicrophoneVolume(volume); } + int32_t MicrophoneVolume(uint32_t* volume) const override { return inner_->MicrophoneVolume(volume); } + + // --- Speaker/Microphone init (forward) --- + int32_t InitSpeaker() override { return inner_->InitSpeaker(); } + bool SpeakerIsInitialized() const override { return inner_->SpeakerIsInitialized(); } + int32_t InitMicrophone() override { return inner_->InitMicrophone(); } + bool MicrophoneIsInitialized() const override { return inner_->MicrophoneIsInitialized(); } + + // --- Speaker Volume (forward) --- + int32_t SpeakerVolumeIsAvailable(bool* available) override { return inner_->SpeakerVolumeIsAvailable(available); } + int32_t SetSpeakerVolume(uint32_t volume) override { return inner_->SetSpeakerVolume(volume); } + int32_t SpeakerVolume(uint32_t* volume) const override { return inner_->SpeakerVolume(volume); } + int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override { return inner_->MaxSpeakerVolume(maxVolume); } + int32_t MinSpeakerVolume(uint32_t* minVolume) const override { return inner_->MinSpeakerVolume(minVolume); } + + // --- Microphone Volume (forward) --- + int32_t MicrophoneVolumeIsAvailable(bool* available) override { return inner_->MicrophoneVolumeIsAvailable(available); } + int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { return inner_->MaxMicrophoneVolume(maxVolume); } + int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { return inner_->MinMicrophoneVolume(minVolume); } + + // --- Speaker Mute (forward) --- + int32_t SpeakerMuteIsAvailable(bool* available) override { return inner_->SpeakerMuteIsAvailable(available); } + int32_t SetSpeakerMute(bool enable) override { return inner_->SetSpeakerMute(enable); } + int32_t SpeakerMute(bool* enabled) const override { return inner_->SpeakerMute(enabled); } + + // --- Microphone Mute (forward) --- + int32_t MicrophoneMuteIsAvailable(bool* available) override { return inner_->MicrophoneMuteIsAvailable(available); } + int32_t SetMicrophoneMute(bool enable) override { return inner_->SetMicrophoneMute(enable); } + int32_t MicrophoneMute(bool* enabled) const override { return inner_->MicrophoneMute(enabled); } + + // --- Stereo Support (forward) --- + int32_t StereoPlayoutIsAvailable(bool* available) const override { return inner_->StereoPlayoutIsAvailable(available); } + int32_t StereoPlayout(bool* enabled) const override { return inner_->StereoPlayout(enabled); } + int32_t StereoRecordingIsAvailable(bool* available) const override { return inner_->StereoRecordingIsAvailable(available); } + int32_t StereoRecording(bool* enabled) const override { return inner_->StereoRecording(enabled); } + + // --- Delay/Timing (forward) --- + int32_t PlayoutDelay(uint16_t* delayMS) const override { return inner_->PlayoutDelay(delayMS); } + + // --- Built-in Audio Processing (forward) --- + bool BuiltInAECIsAvailable() const override { return inner_->BuiltInAECIsAvailable(); } + bool BuiltInAGCIsAvailable() const override { return inner_->BuiltInAGCIsAvailable(); } + bool BuiltInNSIsAvailable() const override { return inner_->BuiltInNSIsAvailable(); } + int32_t EnableBuiltInAEC(bool enable) override { return inner_->EnableBuiltInAEC(enable); } + int32_t EnableBuiltInAGC(bool enable) override { return inner_->EnableBuiltInAGC(enable); } + int32_t EnableBuiltInNS(bool enable) override { return inner_->EnableBuiltInNS(enable); } + + // --- Additional AudioDeviceModule methods (forward) --- + int32_t GetPlayoutUnderrunCount() const override { return inner_->GetPlayoutUnderrunCount(); } + + // Used to generate RTC stats. If not implemented, RTCAudioPlayoutStats will + // not be present in the stats. + std::optional GetStats() const override { return inner_->GetStats(); } + +// Only supported on iOS. +#if defined(WEBRTC_IOS) + virtual int GetPlayoutAudioParameters(AudioParameters* params) const override { return inner_->GetPlayoutAudioParameters(params); } + virtual int GetRecordAudioParameters(AudioParameters* params) override { return inner_->GetRecordAudioParameters(params); } +#endif // WEBRTC_IOS + + virtual int32_t GetPlayoutDevice() const override { return inner_->GetPlayoutDevice(); } + virtual int32_t GetRecordingDevice() const override { return inner_->GetRecordingDevice(); } + + // tuning microphone energy calculations + float GetMicrophoneEnergy() { return audio_transport_.GetMicrophoneEnergy(); } + void SetTuning(bool tuning) + { + tuning_ = tuning; + inner_->InitRecording(); + inner_->StartRecording(); + if (tuning) + { + inner_->StopPlayout(); + } + else + { + inner_->StartPlayout(); + } + } + +protected: + ~LLWebRTCAudioDeviceModule() override = default; + +private: + webrtc::scoped_refptr inner_; + LLWebRTCAudioTransport audio_transport_; + + bool tuning_; }; +class LLCustomProcessorState +{ + +public: + float getMicrophoneEnergy() { return mMicrophoneEnergy.load(std::memory_order_relaxed); } + void setMicrophoneEnergy(float energy) { mMicrophoneEnergy.store(energy, std::memory_order_relaxed); } + + void setGain(float gain) { mGain.store(gain, std::memory_order_relaxed); } + float getGain() { return mGain.load(std::memory_order_relaxed); } + + protected: + std::atomic mMicrophoneEnergy{ 0.0f }; + std::atomic mGain{ 0.0f }; +}; + +using LLCustomProcessorStatePtr = std::shared_ptr; + // Used to process/retrieve audio levels after // all of the processing (AGC, AEC, etc.) for display in-world to the user. class LLCustomProcessor : public webrtc::CustomProcessing { public: - LLCustomProcessor(); + LLCustomProcessor(LLCustomProcessorStatePtr state); ~LLCustomProcessor() override {} // (Re-) Initializes the submodule. @@ -167,24 +368,20 @@ class LLCustomProcessor : public webrtc::CustomProcessing // Returns a string representation of the module state. std::string ToString() const override { return ""; } - float getMicrophoneEnergy() { return mMicrophoneEnergy; } - - void setGain(float gain) { mGain = gain; } - protected: static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing int mSampleRateHz; int mNumChannels; float mSumVector[NUM_PACKETS_TO_FILTER]; - float mMicrophoneEnergy; - float mGain; + friend LLCustomProcessorState; + LLCustomProcessorStatePtr mState; }; // Primary singleton implementation for interfacing // with the native webrtc library. -class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceSink +class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceObserver { public: LLWebRTCImpl(LLWebRTCLogCallback* logCallback); @@ -217,7 +414,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS void setPeerConnectionGain(float gain) override; // - // AudioDeviceSink + // AudioDeviceObserver // void OnDevicesUpdated() override; @@ -246,19 +443,19 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS mNetworkThread->PostTask(std::move(task), location); } - void WorkerBlockingCall(rtc::FunctionView functor, + void WorkerBlockingCall(webrtc::FunctionView functor, const webrtc::Location& location = webrtc::Location::Current()) { mWorkerThread->BlockingCall(std::move(functor), location); } - void SignalingBlockingCall(rtc::FunctionView functor, + void SignalingBlockingCall(webrtc::FunctionView functor, const webrtc::Location& location = webrtc::Location::Current()) { mSignalingThread->BlockingCall(std::move(functor), location); } - void NetworkBlockingCall(rtc::FunctionView functor, + void NetworkBlockingCall(webrtc::FunctionView functor, const webrtc::Location& location = webrtc::Location::Current()) { mNetworkThread->BlockingCall(std::move(functor), location); @@ -266,7 +463,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS // Allows the LLWebRTCPeerConnectionImpl class to retrieve the // native webrtc PeerConnectionFactory. - rtc::scoped_refptr getPeerConnectionFactory() + webrtc::scoped_refptr getPeerConnectionFactory() { return mPeerConnectionFactory; } @@ -275,23 +472,20 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS LLWebRTCPeerConnectionInterface* newPeerConnection(); void freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection); - // enables/disables capture via the capture device - void setRecording(bool recording); - - void setPlayout(bool playing); - protected: + + void workerDeployDevices(); LLWebRTCLogSink* mLogSink; // The native webrtc threads - std::unique_ptr mNetworkThread; - std::unique_ptr mWorkerThread; - std::unique_ptr mSignalingThread; + std::unique_ptr mNetworkThread; + std::unique_ptr mWorkerThread; + std::unique_ptr mSignalingThread; // The factory that allows creation of native webrtc PeerConnections. - rtc::scoped_refptr mPeerConnectionFactory; + webrtc::scoped_refptr mPeerConnectionFactory; - rtc::scoped_refptr mAudioProcessingModule; + webrtc::scoped_refptr mAudioProcessingModule; // more native webrtc stuff std::unique_ptr mTaskQueueFactory; @@ -301,8 +495,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS void updateDevices(); void deployDevices(); bool mDevicesDeploying; - rtc::scoped_refptr mTuningDeviceModule; - rtc::scoped_refptr mPeerDeviceModule; + webrtc::scoped_refptr mDeviceModule; std::vector mVoiceDevicesObserverList; // accessors in native webrtc for devices aren't apparently implemented yet. @@ -315,11 +508,10 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS bool mMute; - LLAudioDeviceObserver * mTuningAudioDeviceObserver; - LLCustomProcessor * mPeerCustomProcessor; + LLCustomProcessorStatePtr mPeerCustomProcessor; // peer connections - std::vector> mPeerConnections; + std::vector> mPeerConnections; }; @@ -344,7 +536,7 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface, void terminate(); virtual void AddRef() const override = 0; - virtual rtc::RefCountReleaseStatus Release() const override = 0; + virtual webrtc::RefCountReleaseStatus Release() const override = 0; // // LLWebRTCPeerConnection @@ -375,10 +567,10 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface, // void OnSignalingChange(webrtc::PeerConnectionInterface::SignalingState new_state) override {} - void OnAddTrack(rtc::scoped_refptr receiver, - const std::vector> &streams) override; - void OnRemoveTrack(rtc::scoped_refptr receiver) override; - void OnDataChannel(rtc::scoped_refptr channel) override; + void OnAddTrack(webrtc::scoped_refptr receiver, + const std::vector> &streams) override; + void OnRemoveTrack(webrtc::scoped_refptr receiver) override; + void OnDataChannel(webrtc::scoped_refptr channel) override; void OnRenegotiationNeeded() override {} void OnIceConnectionChange(webrtc::PeerConnectionInterface::IceConnectionState new_state) override {}; void OnIceGatheringChange(webrtc::PeerConnectionInterface::IceGatheringState new_state) override; @@ -417,7 +609,7 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface, LLWebRTCImpl * mWebRTCImpl; - rtc::scoped_refptr mPeerConnectionFactory; + webrtc::scoped_refptr mPeerConnectionFactory; typedef enum { MUTE_INITIAL, @@ -431,12 +623,12 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface, std::vector> mCachedIceCandidates; bool mAnswerReceived; - rtc::scoped_refptr mPeerConnection; - rtc::scoped_refptr mLocalStream; + webrtc::scoped_refptr mPeerConnection; + webrtc::scoped_refptr mLocalStream; // data std::vector mDataObserverList; - rtc::scoped_refptr mDataChannel; + webrtc::scoped_refptr mDataChannel; }; } From 6ae2182e42b06b7024debdf4003ff4207a9dedca Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Sun, 31 Aug 2025 20:27:03 -0700 Subject: [PATCH 04/14] Properly pass the observer setting into the inner audio device module --- indra/llwebrtc/llwebrtc_impl.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h index b7ebc6821ca..de618a0912c 100644 --- a/indra/llwebrtc/llwebrtc_impl.h +++ b/indra/llwebrtc/llwebrtc_impl.h @@ -306,6 +306,8 @@ class LLWebRTCAudioDeviceModule : public webrtc::AudioDeviceModule virtual int32_t GetPlayoutDevice() const override { return inner_->GetPlayoutDevice(); } virtual int32_t GetRecordingDevice() const override { return inner_->GetRecordingDevice(); } + virtual int32_t SetObserver(webrtc::AudioDeviceObserver* observer) override { return inner_->SetObserver(observer); } + // tuning microphone energy calculations float GetMicrophoneEnergy() { return audio_transport_.GetMicrophoneEnergy(); } From 94cfc21889edb7416c6e1db1b833411a29e95409 Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Mon, 8 Sep 2025 14:55:49 -0700 Subject: [PATCH 05/14] Update to m137 and get rid of some noise This change updates to m137 from m114, which required a few API changes. Additionally, this fixes the hiss that happens shortly after someone unmutes: https://github.com/secondlife/server/issues/2094 There was also an issue with a slight amount of repeated after unmuting if there was audio right before unmuting. This is because the audio processing and buffering still had audio from the previous speaking session. Now, we inject nearly a half second of silence into the audio buffers/processor after unmuting to flush things. --- indra/llwebrtc/llwebrtc.cpp | 115 +++++++++++++++++++++++++++----- indra/llwebrtc/llwebrtc.h | 2 + indra/llwebrtc/llwebrtc_impl.h | 49 +++++++++++--- indra/newview/llvoicewebrtc.cpp | 15 ++++- 4 files changed, 152 insertions(+), 29 deletions(-) diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index 509714e9d72..586d9105af3 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -9,7 +9,7 @@ * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; - * version 2.1 of the License only. + * version 2.1 of the License only * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -178,7 +178,7 @@ void LLCustomProcessor::Initialize(int sample_rate_hz, int num_channels) memset(mSumVector, 0, sizeof(mSumVector)); } -void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in) +void LLCustomProcessor::Process(webrtc::AudioBuffer *audio) { webrtc::StreamConfig stream_config; stream_config.set_sample_rate_hz(mSampleRateHz); @@ -186,7 +186,7 @@ void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in) std::vector frame; std::vector frame_samples; - if (audio_in->num_channels() < 1 || audio_in->num_frames() < 480) + if (audio->num_channels() < 1 || audio->num_frames() < 480) { return; } @@ -199,20 +199,57 @@ void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in) frame[ch] = &(frame_samples)[ch * stream_config.num_frames()]; } - audio_in->CopyTo(stream_config, &frame[0]); + audio->CopyTo(stream_config, &frame[0]); // calculate the energy - float energy = 0; - float gain = mState->getGain(); - for (size_t index = 0; index < stream_config.num_samples(); index++) + + float desired_gain = mState->getGain(); + if (mState->getDirty()) { - float sample = frame_samples[index]; - sample = sample * gain; // apply gain - frame_samples[index] = sample; // write processed sample back to buffer. - energy += sample * sample; + // We'll delay ramping by 30ms in order to clear out buffers that may + // have had content before muting. And for the last 20ms, we'll ramp + // down or up smoothly. + mRampFrames = 5; + + // we've changed our desired gain, so set the incremental + // gain change so that we smoothly step over 20ms + mGainStep = (desired_gain - mCurrentGain) / (mSampleRateHz / 50); + } + + if (mRampFrames) + { + if (mRampFrames-- > 2) + { + // don't change the gain if we're still in the 'don't move' phase + mGainStep = 0.0f; + } + } + else + { + // We've ramped all the way down, so don't step the gain any more and + // just maintaint he current gain. + mGainStep = 0.0f; + mCurrentGain = desired_gain; } - audio_in->CopyFrom(&frame[0], stream_config); + float energy = 0; + + float gain = mCurrentGain; + for (size_t index = 0; index < stream_config.num_samples() / stream_config.num_channels(); index++) + { + for (size_t ch = 0; ch < stream_config.num_channels(); ch++) + { + size_t sample_index = index * stream_config.num_channels() + ch; + float sample = frame_samples[sample_index]; + sample = sample * gain; // apply gain + frame_samples[sample_index] = sample; // write processed sample back to buffer. + energy += sample * sample; + } + gain = gain + mGainStep; // adjust gain + } + mCurrentGain = gain; + + audio->CopyFrom(&frame[0], stream_config); // smooth it. size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]); @@ -239,7 +276,8 @@ LLWebRTCImpl::LLWebRTCImpl(LLWebRTCLogCallback* logCallback) : mTuningMode(false), mDevicesDeploying(false), mPlayoutDevice(PLAYOUT_DEVICE_DEFAULT), - mRecordingDevice(RECORD_DEVICE_DEFAULT) + mRecordingDevice(RECORD_DEVICE_DEFAULT), + mGain(0.0f) { } @@ -453,7 +491,11 @@ void LLWebRTCImpl::workerDeployDevices() mDeviceModule->InitSpeaker(); mDeviceModule->SetStereoPlayout(true); mDeviceModule->InitPlayout(); - mDeviceModule->ForceStartRecording(); + + if (!mMute || mTuningMode) + { + mDeviceModule->ForceStartRecording(); + } uint32_t min_v = 0, max_v = 0, cur_v = 0; bool have_hw = false; @@ -591,11 +633,10 @@ void LLWebRTCImpl::OnDevicesUpdated() void LLWebRTCImpl::setTuningMode(bool enable) { mTuningMode = enable; - mDeviceModule->SetTuning(mTuningMode); mWorkerThread->PostTask( [this] { - mDeviceModule->SetTuning(mTuningMode); + mDeviceModule->SetTuning(mTuningMode, mMute); mSignalingThread->PostTask( [this] { @@ -649,8 +690,47 @@ float LLWebRTCImpl::getTuningAudioLevel() { return mDeviceModule ? -20 * log10f( float LLWebRTCImpl::getPeerConnectionAudioLevel() { return mPeerCustomProcessor ? -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()) : 0.0f; } -void LLWebRTCImpl::setPeerConnectionGain(float gain) { if (mPeerCustomProcessor) mPeerCustomProcessor->setGain(gain); } +void LLWebRTCImpl::setPeerConnectionGain(float gain) +{ + mGain = gain; + if (mPeerCustomProcessor) + { + mPeerCustomProcessor->setGain(gain); + } +} +void LLWebRTCImpl::setMute(bool mute, int delay_ms) +{ + mMute = mute; + if (mPeerCustomProcessor) + { + mPeerCustomProcessor->setGain(mMute ? 0.0f : mGain); + } + if (mMute) + { + mWorkerThread->PostDelayedTask( + [this] + { + if (mDeviceModule) + { + mDeviceModule->ForceStopRecording(); + } + }, + webrtc::TimeDelta::Millis(delay_ms)); + } + else + { + mWorkerThread->PostTask( + [this] + { + if (mDeviceModule) + { + mDeviceModule->InitRecording(); + mDeviceModule->ForceStartRecording(); + } + }); + } +} // // Peer Connection Helpers @@ -939,6 +1019,7 @@ void LLWebRTCPeerConnectionImpl::setMute(bool mute) bool enable = !mute; mMute = new_state; + mWebRTCImpl->PostSignalingTask( [this, force_reset, enable]() { diff --git a/indra/llwebrtc/llwebrtc.h b/indra/llwebrtc/llwebrtc.h index c6fdb909ddc..0296826ec90 100644 --- a/indra/llwebrtc/llwebrtc.h +++ b/indra/llwebrtc/llwebrtc.h @@ -160,6 +160,8 @@ class LLWebRTCDeviceInterface virtual float getTuningAudioLevel() = 0; // for use during tuning virtual float getPeerConnectionAudioLevel() = 0; // for use when not tuning virtual void setPeerConnectionGain(float gain) = 0; + + virtual void setMute(bool mute, int delay_ms = 0) = 0; }; // LLWebRTCAudioInterface provides the viewer with a way diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h index de618a0912c..4a8697da703 100644 --- a/indra/llwebrtc/llwebrtc_impl.h +++ b/indra/llwebrtc/llwebrtc_impl.h @@ -227,7 +227,11 @@ class LLWebRTCAudioDeviceModule : public webrtc::AudioDeviceModule int32_t InitRecording() override { return inner_->InitRecording(); } bool RecordingIsInitialized() const override { return inner_->RecordingIsInitialized(); } - int32_t StartRecording() override { return inner_->StartRecording(); } + int32_t StartRecording() override { + if (tuning_) + return 0; // For tuning, we'll force a Start when we're ready + return inner_->StartRecording(); + } int32_t StopRecording() override { if (tuning_) return 0; // if we're tuning, disregard the StopRecording we get from disabling the streams return inner_->StopRecording(); @@ -311,17 +315,26 @@ class LLWebRTCAudioDeviceModule : public webrtc::AudioDeviceModule // tuning microphone energy calculations float GetMicrophoneEnergy() { return audio_transport_.GetMicrophoneEnergy(); } - void SetTuning(bool tuning) + void SetTuning(bool tuning, bool mute) { tuning_ = tuning; - inner_->InitRecording(); - inner_->StartRecording(); if (tuning) { + inner_->InitRecording(); + inner_->StartRecording(); inner_->StopPlayout(); } else { + if (mute) + { + inner_->StopRecording(); + } + else + { + inner_->InitRecording(); + inner_->StartRecording(); + } inner_->StartPlayout(); } } @@ -343,10 +356,18 @@ class LLCustomProcessorState float getMicrophoneEnergy() { return mMicrophoneEnergy.load(std::memory_order_relaxed); } void setMicrophoneEnergy(float energy) { mMicrophoneEnergy.store(energy, std::memory_order_relaxed); } - void setGain(float gain) { mGain.store(gain, std::memory_order_relaxed); } + void setGain(float gain) + { + mGain.store(gain, std::memory_order_relaxed); + mDirty.store(true, std::memory_order_relaxed); + } + float getGain() { return mGain.load(std::memory_order_relaxed); } + bool getDirty() { return mDirty.exchange(false, std::memory_order_relaxed); } + protected: + std::atomic mDirty{ true }; std::atomic mMicrophoneEnergy{ 0.0f }; std::atomic mGain{ 0.0f }; }; @@ -357,7 +378,7 @@ using LLCustomProcessorStatePtr = std::shared_ptr; // all of the processing (AGC, AEC, etc.) for display in-world to the user. class LLCustomProcessor : public webrtc::CustomProcessing { - public: +public: LLCustomProcessor(LLCustomProcessorStatePtr state); ~LLCustomProcessor() override {} @@ -365,15 +386,18 @@ class LLCustomProcessor : public webrtc::CustomProcessing void Initialize(int sample_rate_hz, int num_channels) override; // Analyzes the given capture or render signal. - void Process(webrtc::AudioBuffer *audio) override; + void Process(webrtc::AudioBuffer* audio) override; // Returns a string representation of the module state. std::string ToString() const override { return ""; } - protected: - static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing - int mSampleRateHz; - int mNumChannels; +protected: + static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing + int mSampleRateHz{ 48000 }; + int mNumChannels{ 2 }; + int mRampFrames{ 2 }; + float mCurrentGain{ 0.0f }; + float mGainStep{ 0.0f }; float mSumVector[NUM_PACKETS_TO_FILTER]; friend LLCustomProcessorState; @@ -415,6 +439,8 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceO void setPeerConnectionGain(float gain) override; + void setMute(bool mute, int delay_ms = 20) override; + // // AudioDeviceObserver // @@ -509,6 +535,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceO LLWebRTCVoiceDeviceList mPlayoutDeviceList; bool mMute; + float mGain; LLCustomProcessorStatePtr mPeerCustomProcessor; diff --git a/indra/newview/llvoicewebrtc.cpp b/indra/newview/llvoicewebrtc.cpp index 6e00a2dfb44..4b991406718 100644 --- a/indra/newview/llvoicewebrtc.cpp +++ b/indra/newview/llvoicewebrtc.cpp @@ -85,6 +85,9 @@ namespace { const F32 MAX_AUDIO_DIST = 50.0f; const F32 VOLUME_SCALE_WEBRTC = 0.01f; const F32 LEVEL_SCALE_WEBRTC = 0.008f; + const uint32_t SET_HIDDEN_RESTORE_DELAY_MS = 200; // 200 ms to unmute again after hiding during teleport + const uint32_t MUTE_FADE_DELAY_MS = 500; // 20ms fade followed by 480ms silence gets rid of the click just after unmuting. + // This is because the buffers and processing is cleared by the silence. const F32 SPEAKING_AUDIO_LEVEL = 0.30; @@ -841,6 +844,11 @@ void LLWebRTCVoiceClient::setHidden(bool hidden) if (inSpatialChannel()) { + if (mWebRTCDeviceInterface) + { + mWebRTCDeviceInterface->setMute(mHidden || mMuteMic, + mHidden ? 0 : SET_HIDDEN_RESTORE_DELAY_MS); // delay 200ms so as to not pile up mutes/unmutes. + } if (mHidden) { // get out of the channel entirely @@ -1007,7 +1015,6 @@ void LLWebRTCVoiceClient::updatePosition(void) { if (participant->mRegion != region->getRegionID()) { participant->mRegion = region->getRegionID(); - setMuteMic(mMuteMic); } } } @@ -1535,6 +1542,12 @@ void LLWebRTCVoiceClient::setMuteMic(bool muted) } mMuteMic = muted; + + if (mWebRTCDeviceInterface) + { + mWebRTCDeviceInterface->setMute(muted, muted ? MUTE_FADE_DELAY_MS : 0); // delay for 40ms on mute to allow buffers to empty + } + // when you're hidden, your mic is always muted. if (!mHidden) { From 0f2ca299f30e6b811796ecfd239b9759ce9989d1 Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Mon, 8 Sep 2025 20:53:45 -0700 Subject: [PATCH 06/14] Install nsis on windows --- .github/workflows/build.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8689f2308ec..9b224b33db0 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -93,7 +93,6 @@ jobs: uses: actions/setup-python@v5 with: python-version: "3.11" - - name: Checkout build variables uses: actions/checkout@v4 with: @@ -243,6 +242,11 @@ jobs: fi export PYTHON_COMMAND_NATIVE="$(native_path "$PYTHON_COMMAND")" + # make sure nsis is installed + if [[ "$RUNNER_OS" == "Windows" ]]; then + choco install nsis -y + fi + ./build.sh # Each artifact is downloaded as a distinct .zip file. Multiple jobs From 71ecc8fd89d0ff892c9c54bdd2ec466cafb7cf8f Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Mon, 8 Sep 2025 22:09:21 -0700 Subject: [PATCH 07/14] Use the newer digital AGC pipeline m137 improved the AGC pipeline and the existing analog style is going away so move to the new digital pipeline. Also, some tweaking for audio levels so that we don't see inworld bars when tuning, so one's own bars seem a reasonable size, etc. --- indra/llwebrtc/llwebrtc.cpp | 25 ++++++++++++++++--------- indra/llwebrtc/llwebrtc.h | 2 +- indra/llwebrtc/llwebrtc_impl.h | 2 +- indra/newview/llvoicewebrtc.cpp | 23 +++-------------------- indra/newview/llvoicewebrtc.h | 5 ----- 5 files changed, 21 insertions(+), 36 deletions(-) diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index 586d9105af3..996cdafa34e 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -323,8 +323,7 @@ void LLWebRTCImpl::init() apm_config.echo_canceller.enabled = false; apm_config.echo_canceller.mobile_mode = false; apm_config.gain_controller1.enabled = false; - apm_config.gain_controller1.mode = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog; - apm_config.gain_controller2.enabled = false; + apm_config.gain_controller2.enabled = true; apm_config.high_pass_filter.enabled = true; apm_config.noise_suppression.enabled = true; apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kVeryHigh; @@ -401,9 +400,9 @@ void LLWebRTCImpl::setAudioConfig(LLWebRTCDeviceInterface::AudioConfig config) webrtc::AudioProcessing::Config apm_config; apm_config.echo_canceller.enabled = config.mEchoCancellation; apm_config.echo_canceller.mobile_mode = false; - apm_config.gain_controller1.enabled = config.mAGC; - apm_config.gain_controller1.mode = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog; - apm_config.gain_controller2.enabled = false; + apm_config.gain_controller1.enabled = false; + apm_config.gain_controller2.enabled = config.mAGC; + apm_config.gain_controller2.adaptive_digital.enabled = true; // auto-level speech apm_config.high_pass_filter.enabled = true; apm_config.transient_suppression.enabled = true; apm_config.pipeline.multi_channel_render = true; @@ -686,14 +685,22 @@ void LLWebRTCImpl::deployDevices() }); } -float LLWebRTCImpl::getTuningAudioLevel() { return mDeviceModule ? -20 * log10f(mDeviceModule->GetMicrophoneEnergy()) : 0.0f; } +float LLWebRTCImpl::getTuningAudioLevel() +{ + return mDeviceModule ? -20 * log10f(mDeviceModule->GetMicrophoneEnergy()) : std::numeric_limits::infinity(); +} -float LLWebRTCImpl::getPeerConnectionAudioLevel() { return mPeerCustomProcessor ? -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()) : 0.0f; } +float LLWebRTCImpl::getPeerConnectionAudioLevel() +{ + return mTuningMode ? std::numeric_limits::infinity() + : (mPeerCustomProcessor ? -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()) + : std::numeric_limits::infinity()); +} -void LLWebRTCImpl::setPeerConnectionGain(float gain) +void LLWebRTCImpl::setMicGain(float gain) { mGain = gain; - if (mPeerCustomProcessor) + if (!mTuningMode && mPeerCustomProcessor) { mPeerCustomProcessor->setGain(gain); } diff --git a/indra/llwebrtc/llwebrtc.h b/indra/llwebrtc/llwebrtc.h index 0296826ec90..e5a67b0ca9f 100644 --- a/indra/llwebrtc/llwebrtc.h +++ b/indra/llwebrtc/llwebrtc.h @@ -159,7 +159,7 @@ class LLWebRTCDeviceInterface virtual void setTuningMode(bool enable) = 0; virtual float getTuningAudioLevel() = 0; // for use during tuning virtual float getPeerConnectionAudioLevel() = 0; // for use when not tuning - virtual void setPeerConnectionGain(float gain) = 0; + virtual void setMicGain(float gain) = 0; virtual void setMute(bool mute, int delay_ms = 0) = 0; }; diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h index 4a8697da703..df02baab52f 100644 --- a/indra/llwebrtc/llwebrtc_impl.h +++ b/indra/llwebrtc/llwebrtc_impl.h @@ -437,7 +437,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceO float getTuningAudioLevel() override; float getPeerConnectionAudioLevel() override; - void setPeerConnectionGain(float gain) override; + void setMicGain(float gain) override; void setMute(bool mute, int delay_ms = 20) override; diff --git a/indra/newview/llvoicewebrtc.cpp b/indra/newview/llvoicewebrtc.cpp index 4b991406718..043e7b59654 100644 --- a/indra/newview/llvoicewebrtc.cpp +++ b/indra/newview/llvoicewebrtc.cpp @@ -204,7 +204,6 @@ bool LLWebRTCVoiceClient::sShuttingDown = false; LLWebRTCVoiceClient::LLWebRTCVoiceClient() : mHidden(false), - mTuningMode(false), mTuningMicGain(0.0), mTuningSpeakerVolume(50), // Set to 50 so the user can hear themselves when he sets his mic volume mDevicesListUpdated(false), @@ -794,21 +793,9 @@ void LLWebRTCVoiceClient::tuningSetSpeakerVolume(float volume) } } -float LLWebRTCVoiceClient::getAudioLevel() -{ - if (mIsInTuningMode) - { - return (1.0f - mWebRTCDeviceInterface->getTuningAudioLevel() * LEVEL_SCALE_WEBRTC) * mTuningMicGain / 2.1f; - } - else - { - return (1.0f - mWebRTCDeviceInterface->getPeerConnectionAudioLevel() * LEVEL_SCALE_WEBRTC) * mMicGain / 2.1f; - } -} - float LLWebRTCVoiceClient::tuningGetEnergy(void) { - return getAudioLevel(); + return (1.0f - mWebRTCDeviceInterface->getTuningAudioLevel() * LEVEL_SCALE_WEBRTC) * mTuningMicGain / 2.1f; } bool LLWebRTCVoiceClient::deviceSettingsAvailable() @@ -1140,11 +1127,7 @@ void LLWebRTCVoiceClient::sendPositionUpdate(bool force) // in the UI. This is done on all sessions, so switching // sessions retains consistent volume levels. void LLWebRTCVoiceClient::updateOwnVolume() { - F32 audio_level = 0.0; - if (!mMuteMic && !mTuningMode) - { - audio_level = getAudioLevel(); - } + F32 audio_level = (1.0f - mWebRTCDeviceInterface->getPeerConnectionAudioLevel() * LEVEL_SCALE_WEBRTC) / 2.1f; sessionState::for_each(boost::bind(predUpdateOwnVolume, _1, audio_level)); } @@ -1586,7 +1569,7 @@ void LLWebRTCVoiceClient::setMicGain(F32 gain) if (gain != mMicGain) { mMicGain = gain; - mWebRTCDeviceInterface->setPeerConnectionGain(gain); + mWebRTCDeviceInterface->setMicGain(gain); } } diff --git a/indra/newview/llvoicewebrtc.h b/indra/newview/llvoicewebrtc.h index 71347f206a5..722d81fdc2b 100644 --- a/indra/newview/llvoicewebrtc.h +++ b/indra/newview/llvoicewebrtc.h @@ -444,10 +444,6 @@ class LLWebRTCVoiceClient : public LLSingleton, private: - // helper function to retrieve the audio level - // Used in multiple places. - float getAudioLevel(); - // Coroutine support methods //--- void voiceConnectionCoro(); @@ -458,7 +454,6 @@ class LLWebRTCVoiceClient : public LLSingleton, LL::WorkQueue::weak_t mMainQueue; - bool mTuningMode; F32 mTuningMicGain; int mTuningSpeakerVolume; bool mDevicesListUpdated; // set to true when the device list has been updated From 1312a75038163c905b9ea5d8bdc14767a688b0cf Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Mon, 8 Sep 2025 22:22:25 -0700 Subject: [PATCH 08/14] Install NSIS during windows sisgning and package build step --- .github/workflows/build.yaml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 9b224b33db0..41c087c9d17 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -242,11 +242,6 @@ jobs: fi export PYTHON_COMMAND_NATIVE="$(native_path "$PYTHON_COMMAND")" - # make sure nsis is installed - if [[ "$RUNNER_OS" == "Windows" ]]; then - choco install nsis -y - fi - ./build.sh # Each artifact is downloaded as a distinct .zip file. Multiple jobs @@ -312,6 +307,10 @@ jobs: needs: build runs-on: windows-2022 steps: + - name: Install NSIS + run: choco install nsis -y + shell: powershell + - name: Sign and package Windows viewer if: env.AZURE_KEY_VAULT_URI && env.AZURE_CERT_NAME && env.AZURE_CLIENT_ID && env.AZURE_CLIENT_SECRET && env.AZURE_TENANT_ID uses: secondlife/viewer-build-util/sign-pkg-windows@v2 From 7fabc9e0c4bb50bd897073ca4bacae5d65260048 Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Tue, 9 Sep 2025 12:46:29 -0700 Subject: [PATCH 09/14] Try pinning the packaging to windows 2022 to deal with missing nsis --- .github/workflows/build.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 41c087c9d17..4d2b9755042 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -307,10 +307,6 @@ jobs: needs: build runs-on: windows-2022 steps: - - name: Install NSIS - run: choco install nsis -y - shell: powershell - - name: Sign and package Windows viewer if: env.AZURE_KEY_VAULT_URI && env.AZURE_CERT_NAME && env.AZURE_CLIENT_ID && env.AZURE_CLIENT_SECRET && env.AZURE_TENANT_ID uses: secondlife/viewer-build-util/sign-pkg-windows@v2 From 65aea52f8278ad3b8ad40fcf1373106750e07a32 Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Wed, 10 Sep 2025 14:32:54 -0700 Subject: [PATCH 10/14] Adjust gain calculation and audio level calculations for tuning and peer connections --- autobuild.xml | 14 +++--- indra/llwebrtc/llwebrtc.cpp | 76 +++++++++++++++------------------ indra/llwebrtc/llwebrtc.h | 1 + indra/llwebrtc/llwebrtc_impl.h | 10 +++-- indra/newview/llvoicewebrtc.cpp | 20 ++++++--- 5 files changed, 65 insertions(+), 56 deletions(-) diff --git a/autobuild.xml b/autobuild.xml index 6cd8b3e60a5..67db627962e 100644 --- a/autobuild.xml +++ b/autobuild.xml @@ -2717,11 +2717,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors archive hash - e41e3a4e9e07bcbf553e725eb468ba1c1943abfa + 8c4d1c363da56bf47178831ac9e03560e5a7e50a hash_algorithm sha1 url - https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.9/webrtc-m137.7151.04.9.17354044714-darwin64-17354044714.tar.zst + https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.15/webrtc-m137.7151.04.15.17623092396-darwin64-17623092396.tar.zst name darwin64 @@ -2731,11 +2731,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors archive hash - 586254d0c87cdaf9bb379ad36f161d42c499cfb3 + 995e116e180ff936bef7d40bcd60c845a299d414 hash_algorithm sha1 url - https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.9/webrtc-m137.7151.04.9.17354044714-linux64-17354044714.tar.zst + https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.15/webrtc-m137.7151.04.15.17623092396-linux64-17623092396.tar.zst name linux64 @@ -2745,11 +2745,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors archive hash - 3159f7e003c98a6ba3e286bb5f716a2127fe9843 + 08abe16a6735ab2eabc80d082f51098d6b87af19 hash_algorithm sha1 url - https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.9/webrtc-m137.7151.04.9.17354044714-windows64-17354044714.tar.zst + https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.15/webrtc-m137.7151.04.15.17623092396-windows64-17623092396.tar.zst name windows64 @@ -2762,7 +2762,7 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors copyright Copyright (c) 2011, The WebRTC project authors. All rights reserved. version - m137.7151.04.9.17354044714 + m137.7151.04.15.17623092396 name webrtc vcs_branch diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index 996cdafa34e..6777b2458b1 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -65,8 +65,8 @@ void LLWebRTCAudioTransport::SetEngineTransport(webrtc::AudioTransport* t) } int32_t LLWebRTCAudioTransport::RecordedDataIsAvailable(const void* audio_data, - size_t number_of_samples, - size_t bytes_per_sample, + size_t number_of_frames, + size_t bytes_per_frame, size_t number_of_channels, uint32_t samples_per_sec, uint32_t total_delay_ms, @@ -82,8 +82,8 @@ int32_t LLWebRTCAudioTransport::RecordedDataIsAvailable(const void* audio_data, if (engine) { ret = engine->RecordedDataIsAvailable(audio_data, - number_of_samples, - bytes_per_sample, + number_of_frames, + bytes_per_frame, number_of_channels, samples_per_sec, total_delay_ms, @@ -97,12 +97,14 @@ int32_t LLWebRTCAudioTransport::RecordedDataIsAvailable(const void* audio_data, // calculate the energy float energy = 0; const short *samples = (const short *) audio_data; - for (size_t index = 0; index < number_of_samples * number_of_channels; index++) + + for (size_t index = 0; index < number_of_frames * number_of_channels; index++) { float sample = (static_cast(samples[index]) / (float) 32767); energy += sample * sample; } - + float gain = mGain.load(std::memory_order_relaxed); + energy = energy * gain * gain; // smooth it. size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]); float totalSum = 0; @@ -114,13 +116,13 @@ int32_t LLWebRTCAudioTransport::RecordedDataIsAvailable(const void* audio_data, } mSumVector[i] = energy; totalSum += energy; - mMicrophoneEnergy = std::sqrt(totalSum / (number_of_samples * buffer_size)); + mMicrophoneEnergy = std::sqrt(totalSum / (number_of_frames * number_of_channels * buffer_size)); return ret; } -int32_t LLWebRTCAudioTransport::NeedMorePlayData(size_t number_of_samples, - size_t bytes_per_sample, +int32_t LLWebRTCAudioTransport::NeedMorePlayData(size_t number_of_frames, + size_t bytes_per_frame, size_t number_of_channels, uint32_t samples_per_sec, void* audio_data, @@ -132,15 +134,15 @@ int32_t LLWebRTCAudioTransport::NeedMorePlayData(size_t number_of_samples, if (!engine) { // No engine sink; output silence to be safe. - const size_t bytes = number_of_samples * bytes_per_sample * number_of_channels; + const size_t bytes = number_of_frames * bytes_per_frame * number_of_channels; memset(audio_data, 0, bytes); - number_of_samples_out = number_of_samples; + number_of_samples_out = bytes_per_frame; return 0; } // Only the engine should fill the buffer. - return engine->NeedMorePlayData(number_of_samples, - bytes_per_sample, + return engine->NeedMorePlayData(number_of_frames, + bytes_per_frame, number_of_channels, samples_per_sec, audio_data, @@ -180,27 +182,11 @@ void LLCustomProcessor::Initialize(int sample_rate_hz, int num_channels) void LLCustomProcessor::Process(webrtc::AudioBuffer *audio) { - webrtc::StreamConfig stream_config; - stream_config.set_sample_rate_hz(mSampleRateHz); - stream_config.set_num_channels(mNumChannels); - std::vector frame; - std::vector frame_samples; - if (audio->num_channels() < 1 || audio->num_frames() < 480) { return; } - // grab the input audio - frame_samples.resize(stream_config.num_samples()); - frame.resize(stream_config.num_channels()); - for (size_t ch = 0; ch < stream_config.num_channels(); ++ch) - { - frame[ch] = &(frame_samples)[ch * stream_config.num_frames()]; - } - - audio->CopyTo(stream_config, &frame[0]); - // calculate the energy float desired_gain = mState->getGain(); @@ -234,22 +220,21 @@ void LLCustomProcessor::Process(webrtc::AudioBuffer *audio) float energy = 0; - float gain = mCurrentGain; - for (size_t index = 0; index < stream_config.num_samples() / stream_config.num_channels(); index++) + auto chans = audio->channels(); + for (size_t ch = 0; ch < audio->num_channels(); ch++) { - for (size_t ch = 0; ch < stream_config.num_channels(); ch++) + float* frame_samples = chans[ch]; + float gain = mCurrentGain; + for (size_t index = 0; index < audio->num_frames(); index++) { - size_t sample_index = index * stream_config.num_channels() + ch; - float sample = frame_samples[sample_index]; + float sample = frame_samples[index]; sample = sample * gain; // apply gain - frame_samples[sample_index] = sample; // write processed sample back to buffer. + frame_samples[index] = sample; // write processed sample back to buffer. energy += sample * sample; + gain += mGainStep; } - gain = gain + mGainStep; // adjust gain } - mCurrentGain = gain; - - audio->CopyFrom(&frame[0], stream_config); + mCurrentGain += audio->num_frames() * mGainStep; // smooth it. size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]); @@ -262,7 +247,7 @@ void LLCustomProcessor::Process(webrtc::AudioBuffer *audio) } mSumVector[i] = energy; totalSum += energy; - mState->setMicrophoneEnergy(std::sqrt(totalSum / (stream_config.num_samples() * buffer_size))); + mState->setMicrophoneEnergy(std::sqrt(totalSum / (audio->num_channels() * audio->num_frames() * buffer_size))); } // @@ -334,6 +319,7 @@ void LLWebRTCImpl::init() mAudioProcessingModule->ApplyConfig(apm_config); webrtc::ProcessingConfig processing_config; + processing_config.input_stream().set_num_channels(2); processing_config.input_stream().set_sample_rate_hz(48000); processing_config.output_stream().set_num_channels(2); @@ -690,6 +676,14 @@ float LLWebRTCImpl::getTuningAudioLevel() return mDeviceModule ? -20 * log10f(mDeviceModule->GetMicrophoneEnergy()) : std::numeric_limits::infinity(); } +void LLWebRTCImpl::setTuningMicGain(float gain) +{ + if (mTuningMode && mDeviceModule) + { + mDeviceModule->SetTuningMicGain(gain); + } +} + float LLWebRTCImpl::getPeerConnectionAudioLevel() { return mTuningMode ? std::numeric_limits::infinity() @@ -796,7 +790,7 @@ void LLWebRTCPeerConnectionImpl::init(LLWebRTCImpl * webrtc_impl) } void LLWebRTCPeerConnectionImpl::terminate() { - mWebRTCImpl->SignalingBlockingCall( + mWebRTCImpl->PostSignalingTask( [this]() { if (mPeerConnection) diff --git a/indra/llwebrtc/llwebrtc.h b/indra/llwebrtc/llwebrtc.h index e5a67b0ca9f..7d06b7d2b40 100644 --- a/indra/llwebrtc/llwebrtc.h +++ b/indra/llwebrtc/llwebrtc.h @@ -160,6 +160,7 @@ class LLWebRTCDeviceInterface virtual float getTuningAudioLevel() = 0; // for use during tuning virtual float getPeerConnectionAudioLevel() = 0; // for use when not tuning virtual void setMicGain(float gain) = 0; + virtual void setTuningMicGain(float gain) = 0; virtual void setMute(bool mute, int delay_ms = 0) = 0; }; diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h index df02baab52f..4420ba14f84 100644 --- a/indra/llwebrtc/llwebrtc_impl.h +++ b/indra/llwebrtc/llwebrtc_impl.h @@ -160,13 +160,16 @@ class LLWebRTCAudioTransport : public webrtc::AudioTransport int64_t* elapsed_time_ms, int64_t* ntp_time_ms) override; - float GetMicrophoneEnergy() { return mMicrophoneEnergy; } + float GetMicrophoneEnergy() { return mMicrophoneEnergy.load(std::memory_order_relaxed); } + void SetGain(float gain) { mGain.store(gain, std::memory_order_relaxed); } private: std::atomic engine_{ nullptr }; static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing (30 frames) float mSumVector[NUM_PACKETS_TO_FILTER]; - float mMicrophoneEnergy; + std::atomic mMicrophoneEnergy; + std::atomic mGain{ 0.0f }; + }; @@ -312,9 +315,9 @@ class LLWebRTCAudioDeviceModule : public webrtc::AudioDeviceModule virtual int32_t GetRecordingDevice() const override { return inner_->GetRecordingDevice(); } virtual int32_t SetObserver(webrtc::AudioDeviceObserver* observer) override { return inner_->SetObserver(observer); } - // tuning microphone energy calculations float GetMicrophoneEnergy() { return audio_transport_.GetMicrophoneEnergy(); } + void SetTuningMicGain(float gain) { audio_transport_.SetGain(gain); } void SetTuning(bool tuning, bool mute) { tuning_ = tuning; @@ -438,6 +441,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceO float getPeerConnectionAudioLevel() override; void setMicGain(float gain) override; + void setTuningMicGain(float gain) override; void setMute(bool mute, int delay_ms = 20) override; diff --git a/indra/newview/llvoicewebrtc.cpp b/indra/newview/llvoicewebrtc.cpp index 043e7b59654..38f0fadd084 100644 --- a/indra/newview/llvoicewebrtc.cpp +++ b/indra/newview/llvoicewebrtc.cpp @@ -84,7 +84,7 @@ namespace { const F32 MAX_AUDIO_DIST = 50.0f; const F32 VOLUME_SCALE_WEBRTC = 0.01f; - const F32 LEVEL_SCALE_WEBRTC = 0.008f; + const F32 LEVEL_SCALE_WEBRTC = 0.015f; const uint32_t SET_HIDDEN_RESTORE_DELAY_MS = 200; // 200 ms to unmute again after hiding during teleport const uint32_t MUTE_FADE_DELAY_MS = 500; // 20ms fade followed by 480ms silence gets rid of the click just after unmuting. // This is because the buffers and processing is cleared by the silence. @@ -781,7 +781,14 @@ bool LLWebRTCVoiceClient::inTuningMode() void LLWebRTCVoiceClient::tuningSetMicVolume(float volume) { - mTuningMicGain = volume; + if (volume != mTuningMicGain) + { + mTuningMicGain = volume; + if (mWebRTCDeviceInterface) + { + mWebRTCDeviceInterface->setTuningMicGain(volume); + } + } } void LLWebRTCVoiceClient::tuningSetSpeakerVolume(float volume) @@ -795,7 +802,7 @@ void LLWebRTCVoiceClient::tuningSetSpeakerVolume(float volume) float LLWebRTCVoiceClient::tuningGetEnergy(void) { - return (1.0f - mWebRTCDeviceInterface->getTuningAudioLevel() * LEVEL_SCALE_WEBRTC) * mTuningMicGain / 2.1f; + return (1.0f - mWebRTCDeviceInterface->getTuningAudioLevel() * LEVEL_SCALE_WEBRTC)/1.5f; } bool LLWebRTCVoiceClient::deviceSettingsAvailable() @@ -1127,7 +1134,7 @@ void LLWebRTCVoiceClient::sendPositionUpdate(bool force) // in the UI. This is done on all sessions, so switching // sessions retains consistent volume levels. void LLWebRTCVoiceClient::updateOwnVolume() { - F32 audio_level = (1.0f - mWebRTCDeviceInterface->getPeerConnectionAudioLevel() * LEVEL_SCALE_WEBRTC) / 2.1f; + F32 audio_level = (1.0f - mWebRTCDeviceInterface->getPeerConnectionAudioLevel() * LEVEL_SCALE_WEBRTC) / 4.0f; sessionState::for_each(boost::bind(predUpdateOwnVolume, _1, audio_level)); } @@ -1569,7 +1576,10 @@ void LLWebRTCVoiceClient::setMicGain(F32 gain) if (gain != mMicGain) { mMicGain = gain; - mWebRTCDeviceInterface->setMicGain(gain); + if (mWebRTCDeviceInterface) + { + mWebRTCDeviceInterface->setMicGain(gain); + } } } From 344b5cbf897dbfbb6e9e87e3396260722dcefe55 Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Wed, 10 Sep 2025 18:50:29 -0700 Subject: [PATCH 11/14] Update with mac universal webrtc build --- autobuild.xml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/autobuild.xml b/autobuild.xml index 67db627962e..c9355c73c04 100644 --- a/autobuild.xml +++ b/autobuild.xml @@ -2717,11 +2717,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors archive hash - 8c4d1c363da56bf47178831ac9e03560e5a7e50a + 43c5f93517794aeade550e4266b959d1f0cfcb7f hash_algorithm sha1 url - https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.15/webrtc-m137.7151.04.15.17623092396-darwin64-17623092396.tar.zst + https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.20-universal/webrtc-m137.7151.04.20-universal.17630578914-darwin64-17630578914.tar.zst name darwin64 @@ -2731,11 +2731,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors archive hash - 995e116e180ff936bef7d40bcd60c845a299d414 + efc5b176d878cfc16b8f82445d82ddb96815b6ab hash_algorithm sha1 url - https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.15/webrtc-m137.7151.04.15.17623092396-linux64-17623092396.tar.zst + https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.20-universal/webrtc-m137.7151.04.20-universal.17630578914-linux64-17630578914.tar.zst name linux64 @@ -2745,11 +2745,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors archive hash - 08abe16a6735ab2eabc80d082f51098d6b87af19 + 1e36f100de32c7c71325497a672fb1659b3f206d hash_algorithm sha1 url - https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.15/webrtc-m137.7151.04.15.17623092396-windows64-17623092396.tar.zst + https://github.com/secondlife/3p-webrtc-build/releases/download/m137.7151.04.20-universal/webrtc-m137.7151.04.20-universal.17630578914-windows64-17630578914.tar.zst name windows64 @@ -2762,7 +2762,7 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors copyright Copyright (c) 2011, The WebRTC project authors. All rights reserved. version - m137.7151.04.15.17623092396 + m137.7151.04.20-universal.17630578914 name webrtc vcs_branch From c4c0a6ba707ddc0d67f130abb649d2c0cde3b99d Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Thu, 11 Sep 2025 15:55:12 -0700 Subject: [PATCH 12/14] Tuning of voice indicators for both tuning mode and inworld for self. --- indra/llwebrtc/llwebrtc.cpp | 34 ++++++------------ indra/llwebrtc/llwebrtc_impl.h | 9 +++-- indra/newview/llvoicewebrtc.cpp | 62 +++++++++++++++++++++++++-------- 3 files changed, 65 insertions(+), 40 deletions(-) diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index 6777b2458b1..057fd319bab 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -481,18 +481,6 @@ void LLWebRTCImpl::workerDeployDevices() { mDeviceModule->ForceStartRecording(); } - uint32_t min_v = 0, max_v = 0, cur_v = 0; - bool have_hw = false; - - mDeviceModule->MicrophoneVolumeIsAvailable(&have_hw); - if (have_hw) - { - mDeviceModule->MinMicrophoneVolume(&min_v); - mDeviceModule->MaxMicrophoneVolume(&max_v); - uint32_t target = min_v + (max_v - min_v) * 8 / 10; // ~80% - mDeviceModule->SetMicrophoneVolume(target); - mDeviceModule->MicrophoneVolume(&cur_v); - } mDeviceModule->StartPlayout(); } @@ -522,11 +510,11 @@ void LLWebRTCImpl::setCaptureDevice(const std::string &id) } } - // Always deploy devices, as we may have received a device update - // for the default device, which may be the same as mRecordingDevice - // but still needs to be refreshed. - mRecordingDevice = recordingDevice; - deployDevices(); + if (mRecordingDevice != recordingDevice) + { + mRecordingDevice = recordingDevice; + deployDevices(); + } } void LLWebRTCImpl::setRenderDevice(const std::string &id) @@ -554,11 +542,11 @@ void LLWebRTCImpl::setRenderDevice(const std::string &id) } } - // Always deploy devices, as we may have received a device update - // for the default device, which may be the same as mPlayoutDevice - // but still needs to be refreshed. - mPlayoutDevice = playoutDevice; - deployDevices(); + if (mPlayoutDevice != playoutDevice) + { + mPlayoutDevice = playoutDevice; + deployDevices(); + } } // updateDevices needs to happen on the worker thread. @@ -611,7 +599,7 @@ void LLWebRTCImpl::OnDevicesUpdated() mRecordingDevice = RECORD_DEVICE_DEFAULT; mPlayoutDevice = PLAYOUT_DEVICE_DEFAULT; - updateDevices(); + deployDevices(); } diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h index 4420ba14f84..ee8609007d2 100644 --- a/indra/llwebrtc/llwebrtc_impl.h +++ b/indra/llwebrtc/llwebrtc_impl.h @@ -231,9 +231,12 @@ class LLWebRTCAudioDeviceModule : public webrtc::AudioDeviceModule int32_t InitRecording() override { return inner_->InitRecording(); } bool RecordingIsInitialized() const override { return inner_->RecordingIsInitialized(); } int32_t StartRecording() override { - if (tuning_) - return 0; // For tuning, we'll force a Start when we're ready - return inner_->StartRecording(); + // ignore start recording as webrtc.lib will + // send one when streams first connect, resulting + // in an inadvertant 'recording' when mute is on. + // We take full control of StartRecording via + // ForceStartRecording below. + return 0; } int32_t StopRecording() override { if (tuning_) return 0; // if we're tuning, disregard the StopRecording we get from disabling the streams diff --git a/indra/newview/llvoicewebrtc.cpp b/indra/newview/llvoicewebrtc.cpp index 38f0fadd084..b26a48fd5f8 100644 --- a/indra/newview/llvoicewebrtc.cpp +++ b/indra/newview/llvoicewebrtc.cpp @@ -82,9 +82,12 @@ const std::string WEBRTC_VOICE_SERVER_TYPE = "webrtc"; namespace { - const F32 MAX_AUDIO_DIST = 50.0f; - const F32 VOLUME_SCALE_WEBRTC = 0.01f; - const F32 LEVEL_SCALE_WEBRTC = 0.015f; + const F32 MAX_AUDIO_DIST = 50.0f; + const F32 VOLUME_SCALE_WEBRTC = 0.01f; + const F32 TUNING_LEVEL_SCALE = 0.01f; + const F32 TUNING_LEVEL_START_POINT = 0.8f; + const F32 LEVEL_SCALE = 0.005f; + const F32 LEVEL_START_POINT = 0.18f; const uint32_t SET_HIDDEN_RESTORE_DELAY_MS = 200; // 200 ms to unmute again after hiding during teleport const uint32_t MUTE_FADE_DELAY_MS = 500; // 20ms fade followed by 480ms silence gets rid of the click just after unmuting. // This is because the buffers and processing is cleared by the silence. @@ -350,25 +353,45 @@ void LLWebRTCVoiceClient::updateSettings() static LLCachedControl sOutputDevice(gSavedSettings, "VoiceOutputAudioDevice"); setRenderDevice(sOutputDevice); - LL_INFOS("Voice") << "Input device: " << std::quoted(sInputDevice()) << ", output device: " << std::quoted(sOutputDevice()) << LL_ENDL; + LL_INFOS("Voice") << "Input device: " << std::quoted(sInputDevice()) << ", output device: " << std::quoted(sOutputDevice()) + << LL_ENDL; static LLCachedControl sMicLevel(gSavedSettings, "AudioLevelMic"); setMicGain(sMicLevel); llwebrtc::LLWebRTCDeviceInterface::AudioConfig config; + bool audioConfigChanged = false; + static LLCachedControl sEchoCancellation(gSavedSettings, "VoiceEchoCancellation", true); - config.mEchoCancellation = sEchoCancellation; + if (sEchoCancellation != config.mEchoCancellation) + { + config.mEchoCancellation = sEchoCancellation; + audioConfigChanged = true; + } static LLCachedControl sAGC(gSavedSettings, "VoiceAutomaticGainControl", true); - config.mAGC = sAGC; + if (sAGC != config.mAGC) + { + config.mAGC = sAGC; + audioConfigChanged = true; + } - static LLCachedControl sNoiseSuppressionLevel(gSavedSettings, + static LLCachedControl sNoiseSuppressionLevel( + gSavedSettings, "VoiceNoiseSuppressionLevel", llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel::NOISE_SUPPRESSION_LEVEL_VERY_HIGH); - config.mNoiseSuppressionLevel = (llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel)(U32)sNoiseSuppressionLevel; - - mWebRTCDeviceInterface->setAudioConfig(config); + auto noiseSuppressionLevel = + (llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel)(U32)sNoiseSuppressionLevel; + if (noiseSuppressionLevel != config.mNoiseSuppressionLevel) + { + config.mNoiseSuppressionLevel = noiseSuppressionLevel; + audioConfigChanged = true; + } + if (audioConfigChanged) + { + mWebRTCDeviceInterface->setAudioConfig(config); + } } } @@ -802,7 +825,8 @@ void LLWebRTCVoiceClient::tuningSetSpeakerVolume(float volume) float LLWebRTCVoiceClient::tuningGetEnergy(void) { - return (1.0f - mWebRTCDeviceInterface->getTuningAudioLevel() * LEVEL_SCALE_WEBRTC)/1.5f; + float rms = mWebRTCDeviceInterface->getTuningAudioLevel(); + return TUNING_LEVEL_START_POINT - TUNING_LEVEL_SCALE * rms; } bool LLWebRTCVoiceClient::deviceSettingsAvailable() @@ -1133,9 +1157,14 @@ void LLWebRTCVoiceClient::sendPositionUpdate(bool force) // Update our own volume on our participant, so it'll show up // in the UI. This is done on all sessions, so switching // sessions retains consistent volume levels. -void LLWebRTCVoiceClient::updateOwnVolume() { - F32 audio_level = (1.0f - mWebRTCDeviceInterface->getPeerConnectionAudioLevel() * LEVEL_SCALE_WEBRTC) / 4.0f; - +void LLWebRTCVoiceClient::updateOwnVolume() +{ + F32 audio_level = 0.0f; + if (!mMuteMic) + { + float rms = mWebRTCDeviceInterface->getPeerConnectionAudioLevel(); + audio_level = LEVEL_START_POINT - LEVEL_SCALE * rms; + } sessionState::for_each(boost::bind(predUpdateOwnVolume, _1, audio_level)); } @@ -1533,6 +1562,11 @@ void LLWebRTCVoiceClient::setMuteMic(bool muted) mMuteMic = muted; + if (mIsInTuningMode) + { + return; + } + if (mWebRTCDeviceInterface) { mWebRTCDeviceInterface->setMute(muted, muted ? MUTE_FADE_DELAY_MS : 0); // delay for 40ms on mute to allow buffers to empty From a9a18a99e0fbb3111fc8be4a09de773c2e0a6a9e Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Fri, 12 Sep 2025 13:12:09 -0700 Subject: [PATCH 13/14] Redo device deployment to handle cases where multiple deploy requests pile up Also, mute when leaving webrtc-enabled regions or parcels, and unmute when voice comes back. --- indra/llwebrtc/llwebrtc.cpp | 181 +++++++++++++++++---------------- indra/llwebrtc/llwebrtc_impl.h | 12 ++- 2 files changed, 102 insertions(+), 91 deletions(-) diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index 057fd319bab..abf30954e3e 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -259,9 +259,7 @@ LLWebRTCImpl::LLWebRTCImpl(LLWebRTCLogCallback* logCallback) : mPeerCustomProcessor(nullptr), mMute(true), mTuningMode(false), - mDevicesDeploying(false), - mPlayoutDevice(PLAYOUT_DEVICE_DEFAULT), - mRecordingDevice(RECORD_DEVICE_DEFAULT), + mDevicesDeploying(0), mGain(0.0f) { } @@ -345,7 +343,6 @@ void LLWebRTCImpl::init() [this]() { mDeviceModule->EnableBuiltInAEC(false); - workerDeployDevices(); updateDevices(); }); @@ -444,107 +441,123 @@ void LLWebRTCImpl::unsetDevicesObserver(LLWebRTCDevicesObserver *observer) // must be run in the worker thread. void LLWebRTCImpl::workerDeployDevices() { + int16_t recordingDevice = RECORD_DEVICE_DEFAULT; +#if WEBRTC_WIN + int16_t recording_device_start = 0; +#else + int16_t recording_device_start = 1; +#endif + + if (mRecordingDevice != "Default") + { + for (int16_t i = recording_device_start; i < mRecordingDeviceList.size(); i++) + { + if (mRecordingDeviceList[i].mID == mRecordingDevice) + { + recordingDevice = i; + break; + } + } + } + mDeviceModule->StopPlayout(); mDeviceModule->ForceStopRecording(); #if WEBRTC_WIN - if (mRecordingDevice < 0) { - mDeviceModule->SetRecordingDevice((webrtc::AudioDeviceModule::WindowsDeviceType)mRecordingDevice); + if (recordingDevice < 0) + { + mDeviceModule->SetRecordingDevice((webrtc::AudioDeviceModule::WindowsDeviceType)recordingDevice); } else { - mDeviceModule->SetRecordingDevice(mRecordingDevice); + mDeviceModule->SetRecordingDevice(recordingDevice); } #else - mDeviceModule->SetRecordingDevice(mRecordingDevice); + mDeviceModule->SetRecordingDevice(recordingDevice); #endif mDeviceModule->InitMicrophone(); mDeviceModule->SetStereoRecording(false); mDeviceModule->InitRecording(); + int16_t playoutDevice = PLAYOUT_DEVICE_DEFAULT; #if WEBRTC_WIN - if (mPlayoutDevice < 0) + int16_t playout_device_start = 0; +#else + int16_t playout_device_start = 1; +#endif + if (mPlayoutDevice != "Default") { - mDeviceModule->SetPlayoutDevice((webrtc::AudioDeviceModule::WindowsDeviceType)mPlayoutDevice); + for (int16_t i = playout_device_start; i < mPlayoutDeviceList.size(); i++) + { + if (mPlayoutDeviceList[i].mID == mPlayoutDevice) + { + playoutDevice = i; + break; + } + } + } + +#if WEBRTC_WIN + if (playoutDevice < 0) + { + mDeviceModule->SetPlayoutDevice((webrtc::AudioDeviceModule::WindowsDeviceType)playoutDevice); } else { - mDeviceModule->SetPlayoutDevice(mPlayoutDevice); + mDeviceModule->SetPlayoutDevice(playoutDevice); } #else - mDeviceModule->SetPlayoutDevice(mPlayoutDevice); + mDeviceModule->SetPlayoutDevice(playoutDevice); #endif mDeviceModule->InitSpeaker(); mDeviceModule->SetStereoPlayout(true); mDeviceModule->InitPlayout(); - if (!mMute || mTuningMode) + if ((!mMute && mPeerConnections.size()) || mTuningMode) { mDeviceModule->ForceStartRecording(); } - mDeviceModule->StartPlayout(); -} -void LLWebRTCImpl::setCaptureDevice(const std::string &id) -{ - int16_t recordingDevice = RECORD_DEVICE_DEFAULT; -#if WEBRTC_WIN - int16_t device_start = 0; -#else - if (mRecordingDeviceList.size()) + if (!mTuningMode) { - // no recording devices - return; + mDeviceModule->StartPlayout(); } - int16_t device_start = 1; -#endif - - if (id != "Default") - { - for (int16_t i = device_start; i < mRecordingDeviceList.size(); i++) + mSignalingThread->PostTask( + [this] { - if (mRecordingDeviceList[i].mID == id) + for (auto& connection : mPeerConnections) { - recordingDevice = i; - break; + if (mTuningMode) + { + connection->enableSenderTracks(false); + } + else + { + connection->resetMute(); + } + connection->enableReceiverTracks(!mTuningMode); } - } - } + if (1 < mDevicesDeploying.fetch_sub(1, std::memory_order_relaxed)) + { + mWorkerThread->PostTask([this] { workerDeployDevices(); }); + } + }); +} + +void LLWebRTCImpl::setCaptureDevice(const std::string &id) +{ - if (mRecordingDevice != recordingDevice) + if (mRecordingDevice != id) { - mRecordingDevice = recordingDevice; + mRecordingDevice = id; deployDevices(); } } void LLWebRTCImpl::setRenderDevice(const std::string &id) { - int16_t playoutDevice = PLAYOUT_DEVICE_DEFAULT; -#if WEBRTC_WIN - int16_t device_start = 0; -#else - if (mPlayoutDeviceList.size()) - { - // no playout devices - return; - } - int16_t device_start = 1; -#endif - if (id != "Default") - { - for (int16_t i = device_start; i < mPlayoutDeviceList.size(); i++) - { - if (mPlayoutDeviceList[i].mID == id) - { - playoutDevice = i; - break; - } - } - } - - if (mPlayoutDevice != playoutDevice) + if (mPlayoutDevice != id) { - mPlayoutDevice = playoutDevice; + mPlayoutDevice = id; deployDevices(); } } @@ -596,9 +609,6 @@ void LLWebRTCImpl::updateDevices() void LLWebRTCImpl::OnDevicesUpdated() { - mRecordingDevice = RECORD_DEVICE_DEFAULT; - mPlayoutDevice = PLAYOUT_DEVICE_DEFAULT; - deployDevices(); } @@ -631,31 +641,13 @@ void LLWebRTCImpl::setTuningMode(bool enable) void LLWebRTCImpl::deployDevices() { - if (mDevicesDeploying) + if (0 < mDevicesDeploying.fetch_add(1, std::memory_order_relaxed)) { return; } - mDevicesDeploying = true; mWorkerThread->PostTask( [this] { workerDeployDevices(); - mSignalingThread->PostTask( - [this] - { - for (auto &connection : mPeerConnections) - { - if (mTuningMode) - { - connection->enableSenderTracks(false); - } - else - { - connection->resetMute(); - } - connection->enableReceiverTracks(!mTuningMode); - } - mDevicesDeploying = false; - }); }); } @@ -690,7 +682,15 @@ void LLWebRTCImpl::setMicGain(float gain) void LLWebRTCImpl::setMute(bool mute, int delay_ms) { - mMute = mute; + if (mMute != mute) + { + mMute = mute; + intSetMute(mute, delay_ms); + } +} + +void LLWebRTCImpl::intSetMute(bool mute, int delay_ms) +{ if (mPeerCustomProcessor) { mPeerCustomProcessor->setGain(mMute ? 0.0f : mGain); @@ -730,9 +730,14 @@ LLWebRTCPeerConnectionInterface *LLWebRTCImpl::newPeerConnection() bool empty = mPeerConnections.empty(); webrtc::scoped_refptr peerConnection = webrtc::scoped_refptr(new webrtc::RefCountedObject()); peerConnection->init(this); - + if (mPeerConnections.empty()) + { + intSetMute(mMute); + } mPeerConnections.emplace_back(peerConnection); + peerConnection->enableSenderTracks(false); + peerConnection->resetMute(); return peerConnection.get(); } @@ -743,6 +748,10 @@ void LLWebRTCImpl::freePeerConnection(LLWebRTCPeerConnectionInterface* peer_conn if (it != mPeerConnections.end()) { mPeerConnections.erase(it); + if (mPeerConnections.empty()) + { + intSetMute(true); + } } } diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h index ee8609007d2..51d42c82b24 100644 --- a/indra/llwebrtc/llwebrtc_impl.h +++ b/indra/llwebrtc/llwebrtc_impl.h @@ -448,6 +448,8 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceO void setMute(bool mute, int delay_ms = 20) override; + void intSetMute(bool mute, int delay_ms = 20); + // // AudioDeviceObserver // @@ -523,22 +525,22 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceO webrtc::scoped_refptr mAudioProcessingModule; // more native webrtc stuff - std::unique_ptr mTaskQueueFactory; + std::unique_ptr mTaskQueueFactory; // Devices void updateDevices(); void deployDevices(); - bool mDevicesDeploying; - webrtc::scoped_refptr mDeviceModule; + std::atomic mDevicesDeploying; + webrtc::scoped_refptr mDeviceModule; std::vector mVoiceDevicesObserverList; // accessors in native webrtc for devices aren't apparently implemented yet. bool mTuningMode; - int32_t mRecordingDevice; + std::string mRecordingDevice; LLWebRTCVoiceDeviceList mRecordingDeviceList; - int32_t mPlayoutDevice; + std::string mPlayoutDevice; LLWebRTCVoiceDeviceList mPlayoutDeviceList; bool mMute; From faec86ff3405af9c47c1dc1495d08ab28a417c14 Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Fri, 12 Sep 2025 13:23:28 -0700 Subject: [PATCH 14/14] pre commit issue --- indra/llwebrtc/llwebrtc.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp index abf30954e3e..edba2bee9ad 100644 --- a/indra/llwebrtc/llwebrtc.cpp +++ b/indra/llwebrtc/llwebrtc.cpp @@ -735,7 +735,7 @@ LLWebRTCPeerConnectionInterface *LLWebRTCImpl::newPeerConnection() intSetMute(mMute); } mPeerConnections.emplace_back(peerConnection); - + peerConnection->enableSenderTracks(false); peerConnection->resetMute(); return peerConnection.get();