diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml
index 8689f2308ec..4d2b9755042 100644
--- a/.github/workflows/build.yaml
+++ b/.github/workflows/build.yaml
@@ -93,7 +93,6 @@ jobs:
uses: actions/setup-python@v5
with:
python-version: "3.11"
-
- name: Checkout build variables
uses: actions/checkout@v4
with:
diff --git a/autobuild.xml b/autobuild.xml
index d1d2f735aff..c9355c73c04 100644
--- a/autobuild.xml
+++ b/autobuild.xml
@@ -2717,11 +2717,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors
archive
name
darwin64
@@ -2731,11 +2731,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors
archive
name
linux64
@@ -2745,11 +2745,11 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors
archive
name
windows64
@@ -2762,7 +2762,7 @@ Copyright (c) 2012, 2014, 2015, 2016 nghttp2 contributors
copyright
Copyright (c) 2011, The WebRTC project authors. All rights reserved.
version
- m114.5735.08.73-alpha.11958809572
+ m137.7151.04.20-universal.17630578914
name
webrtc
vcs_branch
diff --git a/indra/llwebrtc/CMakeLists.txt b/indra/llwebrtc/CMakeLists.txt
index 4fde489942c..eb10f4eee49 100644
--- a/indra/llwebrtc/CMakeLists.txt
+++ b/indra/llwebrtc/CMakeLists.txt
@@ -42,7 +42,7 @@ if (WINDOWS)
iphlpapi
libcmt)
# as the webrtc libraries are release, build this binary as release as well.
- target_compile_options(llwebrtc PRIVATE "/MT")
+ target_compile_options(llwebrtc PRIVATE "/MT" "/Zc:wchar_t")
if (USE_BUGSPLAT)
set_target_properties(llwebrtc PROPERTIES PDB_OUTPUT_DIRECTORY "${SYMBOLS_STAGING_DIR}")
endif (USE_BUGSPLAT)
diff --git a/indra/llwebrtc/llwebrtc.cpp b/indra/llwebrtc/llwebrtc.cpp
index 20951ff8167..edba2bee9ad 100644
--- a/indra/llwebrtc/llwebrtc.cpp
+++ b/indra/llwebrtc/llwebrtc.cpp
@@ -9,7 +9,7 @@
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
- * version 2.1 of the License only.
+ * version 2.1 of the License only
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
@@ -32,41 +32,79 @@
#include "api/audio_codecs/audio_encoder_factory.h"
#include "api/audio_codecs/builtin_audio_decoder_factory.h"
#include "api/audio_codecs/builtin_audio_encoder_factory.h"
+#include "api/audio/builtin_audio_processing_builder.h"
#include "api/media_stream_interface.h"
#include "api/media_stream_track.h"
#include "modules/audio_processing/audio_buffer.h"
#include "modules/audio_mixer/audio_mixer_impl.h"
+#include "api/environment/environment_factory.h"
namespace llwebrtc
{
+#if WEBRTC_WIN
+static int16_t PLAYOUT_DEVICE_DEFAULT = webrtc::AudioDeviceModule::kDefaultDevice;
+static int16_t RECORD_DEVICE_DEFAULT = webrtc::AudioDeviceModule::kDefaultDevice;
+#else
+static int16_t PLAYOUT_DEVICE_DEFAULT = 0;
+static int16_t RECORD_DEVICE_DEFAULT = 0;
+#endif
-static int16_t PLAYOUT_DEVICE_DEFAULT = -1;
-static int16_t PLAYOUT_DEVICE_BAD = -2;
-static int16_t RECORD_DEVICE_DEFAULT = -1;
-static int16_t RECORD_DEVICE_BAD = -2;
-LLAudioDeviceObserver::LLAudioDeviceObserver() : mSumVector {0}, mMicrophoneEnergy(0.0) {}
+//
+// LLWebRTCAudioTransport implementation
+//
-float LLAudioDeviceObserver::getMicrophoneEnergy() { return mMicrophoneEnergy; }
+LLWebRTCAudioTransport::LLWebRTCAudioTransport() : mMicrophoneEnergy(0.0)
+{
+ memset(mSumVector, 0, sizeof(mSumVector));
+}
-// TODO: Pull smoothing/filtering code into a common helper function
-// for LLAudioDeviceObserver and LLCustomProcessor
+void LLWebRTCAudioTransport::SetEngineTransport(webrtc::AudioTransport* t)
+{
+ engine_.store(t, std::memory_order_release);
+}
-void LLAudioDeviceObserver::OnCaptureData(const void *audio_samples,
- const size_t num_samples,
- const size_t bytes_per_sample,
- const size_t num_channels,
- const uint32_t samples_per_sec)
+int32_t LLWebRTCAudioTransport::RecordedDataIsAvailable(const void* audio_data,
+ size_t number_of_frames,
+ size_t bytes_per_frame,
+ size_t number_of_channels,
+ uint32_t samples_per_sec,
+ uint32_t total_delay_ms,
+ int32_t clock_drift,
+ uint32_t current_mic_level,
+ bool key_pressed,
+ uint32_t& new_mic_level)
{
+ auto* engine = engine_.load(std::memory_order_acquire);
+
+ // 1) Deliver to engine (authoritative).
+ int32_t ret = 0;
+ if (engine)
+ {
+ ret = engine->RecordedDataIsAvailable(audio_data,
+ number_of_frames,
+ bytes_per_frame,
+ number_of_channels,
+ samples_per_sec,
+ total_delay_ms,
+ clock_drift,
+ current_mic_level,
+ key_pressed,
+ new_mic_level);
+ }
+
+ // 2) Calculate energy for microphone level monitoring
// calculate the energy
float energy = 0;
- const short *samples = (const short *) audio_samples;
- for (size_t index = 0; index < num_samples * num_channels; index++)
+ const short *samples = (const short *) audio_data;
+
+ for (size_t index = 0; index < number_of_frames * number_of_channels; index++)
{
float sample = (static_cast(samples[index]) / (float) 32767);
energy += sample * sample;
}
-
+ float gain = mGain.load(std::memory_order_relaxed);
+ energy = energy * gain * gain;
// smooth it.
size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]);
float totalSum = 0;
@@ -78,18 +116,59 @@ void LLAudioDeviceObserver::OnCaptureData(const void *audio_samples,
}
mSumVector[i] = energy;
totalSum += energy;
- mMicrophoneEnergy = std::sqrt(totalSum / (num_samples * buffer_size));
+ mMicrophoneEnergy = std::sqrt(totalSum / (number_of_frames * number_of_channels * buffer_size));
+
+ return ret;
}
-void LLAudioDeviceObserver::OnRenderData(const void *audio_samples,
- const size_t num_samples,
- const size_t bytes_per_sample,
- const size_t num_channels,
- const uint32_t samples_per_sec)
+int32_t LLWebRTCAudioTransport::NeedMorePlayData(size_t number_of_frames,
+ size_t bytes_per_frame,
+ size_t number_of_channels,
+ uint32_t samples_per_sec,
+ void* audio_data,
+ size_t& number_of_samples_out,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms)
{
+ auto* engine = engine_.load(std::memory_order_acquire);
+ if (!engine)
+ {
+ // No engine sink; output silence to be safe.
+ const size_t bytes = number_of_frames * bytes_per_frame * number_of_channels;
+ memset(audio_data, 0, bytes);
+ number_of_samples_out = bytes_per_frame;
+ return 0;
+ }
+
+ // Only the engine should fill the buffer.
+ return engine->NeedMorePlayData(number_of_frames,
+ bytes_per_frame,
+ number_of_channels,
+ samples_per_sec,
+ audio_data,
+ number_of_samples_out,
+ elapsed_time_ms,
+ ntp_time_ms);
}
-LLCustomProcessor::LLCustomProcessor() : mSampleRateHz(0), mNumChannels(0), mMicrophoneEnergy(0.0), mGain(1.0)
+void LLWebRTCAudioTransport::PullRenderData(int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ void* audio_data,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms)
+{
+ auto* engine = engine_.load(std::memory_order_acquire);
+
+ if (engine)
+ {
+ engine
+ ->PullRenderData(bits_per_sample, sample_rate, number_of_channels, number_of_frames, audio_data, elapsed_time_ms, ntp_time_ms);
+ }
+}
+
+LLCustomProcessor::LLCustomProcessor(LLCustomProcessorStatePtr state) : mSampleRateHz(0), mNumChannels(0), mState(state)
{
memset(mSumVector, 0, sizeof(mSumVector));
}
@@ -101,40 +180,61 @@ void LLCustomProcessor::Initialize(int sample_rate_hz, int num_channels)
memset(mSumVector, 0, sizeof(mSumVector));
}
-void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in)
+void LLCustomProcessor::Process(webrtc::AudioBuffer *audio)
{
- webrtc::StreamConfig stream_config;
- stream_config.set_sample_rate_hz(mSampleRateHz);
- stream_config.set_num_channels(mNumChannels);
- std::vector frame;
- std::vector frame_samples;
-
- if (audio_in->num_channels() < 1 || audio_in->num_frames() < 480)
+ if (audio->num_channels() < 1 || audio->num_frames() < 480)
{
return;
}
- // grab the input audio
- frame_samples.resize(stream_config.num_samples());
- frame.resize(stream_config.num_channels());
- for (size_t ch = 0; ch < stream_config.num_channels(); ++ch)
+ // calculate the energy
+
+ float desired_gain = mState->getGain();
+ if (mState->getDirty())
{
- frame[ch] = &(frame_samples)[ch * stream_config.num_frames()];
+ // We'll delay ramping by 30ms in order to clear out buffers that may
+ // have had content before muting. And for the last 20ms, we'll ramp
+ // down or up smoothly.
+ mRampFrames = 5;
+
+ // we've changed our desired gain, so set the incremental
+ // gain change so that we smoothly step over 20ms
+ mGainStep = (desired_gain - mCurrentGain) / (mSampleRateHz / 50);
}
- audio_in->CopyTo(stream_config, &frame[0]);
-
- // calculate the energy
- float energy = 0;
- for (size_t index = 0; index < stream_config.num_samples(); index++)
+ if (mRampFrames)
{
- float sample = frame_samples[index];
- sample = sample * mGain; // apply gain
- frame_samples[index] = sample; // write processed sample back to buffer.
- energy += sample * sample;
+ if (mRampFrames-- > 2)
+ {
+ // don't change the gain if we're still in the 'don't move' phase
+ mGainStep = 0.0f;
+ }
+ }
+ else
+ {
+ // We've ramped all the way down, so don't step the gain any more and
+ // just maintaint he current gain.
+ mGainStep = 0.0f;
+ mCurrentGain = desired_gain;
}
- audio_in->CopyFrom(&frame[0], stream_config);
+ float energy = 0;
+
+ auto chans = audio->channels();
+ for (size_t ch = 0; ch < audio->num_channels(); ch++)
+ {
+ float* frame_samples = chans[ch];
+ float gain = mCurrentGain;
+ for (size_t index = 0; index < audio->num_frames(); index++)
+ {
+ float sample = frame_samples[index];
+ sample = sample * gain; // apply gain
+ frame_samples[index] = sample; // write processed sample back to buffer.
+ energy += sample * sample;
+ gain += mGainStep;
+ }
+ }
+ mCurrentGain += audio->num_frames() * mGainStep;
// smooth it.
size_t buffer_size = sizeof(mSumVector) / sizeof(mSumVector[0]);
@@ -147,7 +247,7 @@ void LLCustomProcessor::Process(webrtc::AudioBuffer *audio_in)
}
mSumVector[i] = energy;
totalSum += energy;
- mMicrophoneEnergy = std::sqrt(totalSum / (stream_config.num_samples() * buffer_size));
+ mState->setMicrophoneEnergy(std::sqrt(totalSum / (audio->num_channels() * audio->num_frames() * buffer_size)));
}
//
@@ -159,89 +259,54 @@ LLWebRTCImpl::LLWebRTCImpl(LLWebRTCLogCallback* logCallback) :
mPeerCustomProcessor(nullptr),
mMute(true),
mTuningMode(false),
- mPlayoutDevice(0),
- mRecordingDevice(0),
- mTuningAudioDeviceObserver(nullptr)
+ mDevicesDeploying(0),
+ mGain(0.0f)
{
}
void LLWebRTCImpl::init()
{
- mPlayoutDevice = 0;
- mRecordingDevice = 0;
- rtc::InitializeSSL();
+ webrtc::InitializeSSL();
// Normal logging is rather spammy, so turn it off.
- rtc::LogMessage::LogToDebug(rtc::LS_NONE);
- rtc::LogMessage::SetLogToStderr(true);
- rtc::LogMessage::AddLogToStream(mLogSink, rtc::LS_VERBOSE);
+ webrtc::LogMessage::LogToDebug(webrtc::LS_NONE);
+ webrtc::LogMessage::SetLogToStderr(true);
+ webrtc::LogMessage::AddLogToStream(mLogSink, webrtc::LS_VERBOSE);
mTaskQueueFactory = webrtc::CreateDefaultTaskQueueFactory();
// Create the native threads.
- mNetworkThread = rtc::Thread::CreateWithSocketServer();
+ mNetworkThread = webrtc::Thread::CreateWithSocketServer();
mNetworkThread->SetName("WebRTCNetworkThread", nullptr);
mNetworkThread->Start();
- mWorkerThread = rtc::Thread::Create();
+ mWorkerThread = webrtc::Thread::Create();
mWorkerThread->SetName("WebRTCWorkerThread", nullptr);
mWorkerThread->Start();
- mSignalingThread = rtc::Thread::Create();
+ mSignalingThread = webrtc::Thread::Create();
mSignalingThread->SetName("WebRTCSignalingThread", nullptr);
mSignalingThread->Start();
- mTuningAudioDeviceObserver = new LLAudioDeviceObserver;
- mWorkerThread->PostTask(
- [this]()
- {
- // Initialize the audio devices on the Worker Thread
- mTuningDeviceModule =
- webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio,
- mTaskQueueFactory.get(),
- std::unique_ptr(mTuningAudioDeviceObserver));
-
- mTuningDeviceModule->Init();
- mTuningDeviceModule->SetPlayoutDevice(mPlayoutDevice);
- mTuningDeviceModule->SetRecordingDevice(mRecordingDevice);
- mTuningDeviceModule->EnableBuiltInAEC(false);
- mTuningDeviceModule->SetAudioDeviceSink(this);
- mTuningDeviceModule->InitMicrophone();
- mTuningDeviceModule->InitSpeaker();
- mTuningDeviceModule->SetStereoRecording(false);
- mTuningDeviceModule->SetStereoPlayout(true);
- mTuningDeviceModule->InitRecording();
- mTuningDeviceModule->InitPlayout();
- updateDevices();
- });
-
mWorkerThread->BlockingCall(
[this]()
{
- // the peer device module doesn't need an observer
- // as we pull peer data after audio processing.
- mPeerDeviceModule = webrtc::CreateAudioDeviceWithDataObserver(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio,
- mTaskQueueFactory.get(),
- nullptr);
- mPeerDeviceModule->Init();
- mPeerDeviceModule->SetPlayoutDevice(mPlayoutDevice);
- mPeerDeviceModule->SetRecordingDevice(mRecordingDevice);
- mPeerDeviceModule->EnableBuiltInAEC(false);
- mPeerDeviceModule->InitMicrophone();
- mPeerDeviceModule->InitSpeaker();
+ webrtc::scoped_refptr realADM =
+ webrtc::AudioDeviceModule::Create(webrtc::AudioDeviceModule::AudioLayer::kPlatformDefaultAudio, mTaskQueueFactory.get());
+ mDeviceModule = webrtc::make_ref_counted(realADM);
+ mDeviceModule->SetObserver(this);
});
// The custom processor allows us to retrieve audio data (and levels)
// from after other audio processing such as AEC, AGC, etc.
- mPeerCustomProcessor = new LLCustomProcessor;
- webrtc::AudioProcessingBuilder apb;
- apb.SetCapturePostProcessing(std::unique_ptr(mPeerCustomProcessor));
- mAudioProcessingModule = apb.Create();
+ mPeerCustomProcessor = std::make_shared();
+ webrtc::BuiltinAudioProcessingBuilder apb;
+ apb.SetCapturePostProcessing(std::make_unique(mPeerCustomProcessor));
+ mAudioProcessingModule = apb.Build(webrtc::CreateEnvironment());
webrtc::AudioProcessing::Config apm_config;
apm_config.echo_canceller.enabled = false;
apm_config.echo_canceller.mobile_mode = false;
apm_config.gain_controller1.enabled = false;
- apm_config.gain_controller1.mode = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog;
- apm_config.gain_controller2.enabled = false;
+ apm_config.gain_controller2.enabled = true;
apm_config.high_pass_filter.enabled = true;
apm_config.noise_suppression.enabled = true;
apm_config.noise_suppression.level = webrtc::AudioProcessing::Config::NoiseSuppression::kVeryHigh;
@@ -252,6 +317,7 @@ void LLWebRTCImpl::init()
mAudioProcessingModule->ApplyConfig(apm_config);
webrtc::ProcessingConfig processing_config;
+
processing_config.input_stream().set_num_channels(2);
processing_config.input_stream().set_sample_rate_hz(48000);
processing_config.output_stream().set_num_channels(2);
@@ -266,13 +332,19 @@ void LLWebRTCImpl::init()
mPeerConnectionFactory = webrtc::CreatePeerConnectionFactory(mNetworkThread.get(),
mWorkerThread.get(),
mSignalingThread.get(),
- mPeerDeviceModule,
+ mDeviceModule,
webrtc::CreateBuiltinAudioEncoderFactory(),
webrtc::CreateBuiltinAudioDecoderFactory(),
nullptr /* video_encoder_factory */,
nullptr /* video_decoder_factory */,
nullptr /* audio_mixer */,
mAudioProcessingModule);
+ mWorkerThread->PostTask(
+ [this]()
+ {
+ mDeviceModule->EnableBuiltInAEC(false);
+ updateDevices();
+ });
}
@@ -294,64 +366,16 @@ void LLWebRTCImpl::terminate()
mWorkerThread->BlockingCall(
[this]()
{
- if (mTuningDeviceModule)
- {
- mTuningDeviceModule->StopRecording();
- mTuningDeviceModule->Terminate();
- }
- if (mPeerDeviceModule)
+ if (mDeviceModule)
{
- mPeerDeviceModule->StopRecording();
- mPeerDeviceModule->Terminate();
- }
- mTuningDeviceModule = nullptr;
- mPeerDeviceModule = nullptr;
- mTaskQueueFactory = nullptr;
- });
- rtc::LogMessage::RemoveLogToStream(mLogSink);
-}
-
-//
-// Devices functions
-//
-// Most device-related functionality needs to happen
-// on the worker thread (the audio thread,) so those calls will be
-// proxied over to that thread.
-//
-void LLWebRTCImpl::setRecording(bool recording)
-{
- mWorkerThread->PostTask(
- [this, recording]()
- {
- if (recording)
- {
- mPeerDeviceModule->SetStereoRecording(false);
- mPeerDeviceModule->InitRecording();
- mPeerDeviceModule->StartRecording();
- }
- else
- {
- mPeerDeviceModule->StopRecording();
- }
- });
-}
-
-void LLWebRTCImpl::setPlayout(bool playing)
-{
- mWorkerThread->PostTask(
- [this, playing]()
- {
- if (playing)
- {
- mPeerDeviceModule->SetStereoPlayout(true);
- mPeerDeviceModule->InitPlayout();
- mPeerDeviceModule->StartPlayout();
- }
- else
- {
- mPeerDeviceModule->StopPlayout();
+ mDeviceModule->StopRecording();
+ mDeviceModule->StopPlayout();
+ mDeviceModule->Terminate();
}
+ mDeviceModule = nullptr;
+ mTaskQueueFactory = nullptr;
});
+ webrtc::LogMessage::RemoveLogToStream(mLogSink);
}
void LLWebRTCImpl::setAudioConfig(LLWebRTCDeviceInterface::AudioConfig config)
@@ -359,9 +383,9 @@ void LLWebRTCImpl::setAudioConfig(LLWebRTCDeviceInterface::AudioConfig config)
webrtc::AudioProcessing::Config apm_config;
apm_config.echo_canceller.enabled = config.mEchoCancellation;
apm_config.echo_canceller.mobile_mode = false;
- apm_config.gain_controller1.enabled = config.mAGC;
- apm_config.gain_controller1.mode = webrtc::AudioProcessing::Config::GainController1::kAdaptiveAnalog;
- apm_config.gain_controller2.enabled = false;
+ apm_config.gain_controller1.enabled = false;
+ apm_config.gain_controller2.enabled = config.mAGC;
+ apm_config.gain_controller2.adaptive_digital.enabled = true; // auto-level speech
apm_config.high_pass_filter.enabled = true;
apm_config.transient_suppression.enabled = true;
apm_config.pipeline.multi_channel_render = true;
@@ -414,142 +438,134 @@ void LLWebRTCImpl::unsetDevicesObserver(LLWebRTCDevicesObserver *observer)
}
}
-void ll_set_device_module_capture_device(rtc::scoped_refptr device_module, int16_t device)
+// must be run in the worker thread.
+void LLWebRTCImpl::workerDeployDevices()
{
+ int16_t recordingDevice = RECORD_DEVICE_DEFAULT;
#if WEBRTC_WIN
- if (device < 0)
- {
- device_module->SetRecordingDevice(webrtc::AudioDeviceModule::kDefaultDevice);
- }
- else
- {
- device_module->SetRecordingDevice(device);
- }
+ int16_t recording_device_start = 0;
#else
- // passed in default is -1, but the device list
- // has it at 0
- device_module->SetRecordingDevice(device + 1);
+ int16_t recording_device_start = 1;
#endif
- device_module->InitMicrophone();
-}
-void LLWebRTCImpl::setCaptureDevice(const std::string &id)
-{
- int16_t recordingDevice = RECORD_DEVICE_DEFAULT;
- if (id != "Default")
+ if (mRecordingDevice != "Default")
{
- for (int16_t i = 0; i < mRecordingDeviceList.size(); i++)
+ for (int16_t i = recording_device_start; i < mRecordingDeviceList.size(); i++)
{
- if (mRecordingDeviceList[i].mID == id)
+ if (mRecordingDeviceList[i].mID == mRecordingDevice)
{
recordingDevice = i;
break;
}
}
}
- if (recordingDevice == mRecordingDevice)
- {
- return;
- }
- mRecordingDevice = recordingDevice;
- if (mTuningMode)
- {
- mWorkerThread->PostTask([this, recordingDevice]()
- {
- ll_set_device_module_capture_device(mTuningDeviceModule, recordingDevice);
- });
- }
- else
- {
- mWorkerThread->PostTask([this, recordingDevice]()
- {
- bool recording = mPeerDeviceModule->Recording();
- if (recording)
- {
- mPeerDeviceModule->StopRecording();
- }
- ll_set_device_module_capture_device(mPeerDeviceModule, recordingDevice);
- if (recording)
- {
- mPeerDeviceModule->SetStereoRecording(false);
- mPeerDeviceModule->InitRecording();
- mPeerDeviceModule->StartRecording();
- }
- });
- }
-}
-
-void ll_set_device_module_render_device(rtc::scoped_refptr device_module, int16_t device)
-{
+ mDeviceModule->StopPlayout();
+ mDeviceModule->ForceStopRecording();
#if WEBRTC_WIN
- if (device < 0)
+ if (recordingDevice < 0)
{
- device_module->SetPlayoutDevice(webrtc::AudioDeviceModule::kDefaultDevice);
+ mDeviceModule->SetRecordingDevice((webrtc::AudioDeviceModule::WindowsDeviceType)recordingDevice);
}
else
{
- device_module->SetPlayoutDevice(device);
+ mDeviceModule->SetRecordingDevice(recordingDevice);
}
#else
- device_module->SetPlayoutDevice(device + 1);
+ mDeviceModule->SetRecordingDevice(recordingDevice);
#endif
- device_module->InitSpeaker();
-}
+ mDeviceModule->InitMicrophone();
+ mDeviceModule->SetStereoRecording(false);
+ mDeviceModule->InitRecording();
-void LLWebRTCImpl::setRenderDevice(const std::string &id)
-{
int16_t playoutDevice = PLAYOUT_DEVICE_DEFAULT;
- if (id != "Default")
+#if WEBRTC_WIN
+ int16_t playout_device_start = 0;
+#else
+ int16_t playout_device_start = 1;
+#endif
+ if (mPlayoutDevice != "Default")
{
- for (int16_t i = 0; i < mPlayoutDeviceList.size(); i++)
+ for (int16_t i = playout_device_start; i < mPlayoutDeviceList.size(); i++)
{
- if (mPlayoutDeviceList[i].mID == id)
+ if (mPlayoutDeviceList[i].mID == mPlayoutDevice)
{
playoutDevice = i;
break;
}
}
}
- if (playoutDevice == mPlayoutDevice)
+
+#if WEBRTC_WIN
+ if (playoutDevice < 0)
{
- return;
+ mDeviceModule->SetPlayoutDevice((webrtc::AudioDeviceModule::WindowsDeviceType)playoutDevice);
+ }
+ else
+ {
+ mDeviceModule->SetPlayoutDevice(playoutDevice);
}
- mPlayoutDevice = playoutDevice;
+#else
+ mDeviceModule->SetPlayoutDevice(playoutDevice);
+#endif
+ mDeviceModule->InitSpeaker();
+ mDeviceModule->SetStereoPlayout(true);
+ mDeviceModule->InitPlayout();
- if (mTuningMode)
+ if ((!mMute && mPeerConnections.size()) || mTuningMode)
{
- mWorkerThread->PostTask(
- [this, playoutDevice]()
- {
- ll_set_device_module_render_device(mTuningDeviceModule, playoutDevice);
- });
+ mDeviceModule->ForceStartRecording();
}
- else
+
+ if (!mTuningMode)
{
- mWorkerThread->PostTask(
- [this, playoutDevice]()
+ mDeviceModule->StartPlayout();
+ }
+ mSignalingThread->PostTask(
+ [this]
+ {
+ for (auto& connection : mPeerConnections)
{
- bool playing = mPeerDeviceModule->Playing();
- if (playing)
+ if (mTuningMode)
{
- mPeerDeviceModule->StopPlayout();
+ connection->enableSenderTracks(false);
}
- ll_set_device_module_render_device(mPeerDeviceModule, playoutDevice);
- if (playing)
+ else
{
- mPeerDeviceModule->SetStereoPlayout(true);
- mPeerDeviceModule->InitPlayout();
- mPeerDeviceModule->StartPlayout();
+ connection->resetMute();
}
- });
+ connection->enableReceiverTracks(!mTuningMode);
+ }
+ if (1 < mDevicesDeploying.fetch_sub(1, std::memory_order_relaxed))
+ {
+ mWorkerThread->PostTask([this] { workerDeployDevices(); });
+ }
+ });
+}
+
+void LLWebRTCImpl::setCaptureDevice(const std::string &id)
+{
+
+ if (mRecordingDevice != id)
+ {
+ mRecordingDevice = id;
+ deployDevices();
+ }
+}
+
+void LLWebRTCImpl::setRenderDevice(const std::string &id)
+{
+ if (mPlayoutDevice != id)
+ {
+ mPlayoutDevice = id;
+ deployDevices();
}
}
// updateDevices needs to happen on the worker thread.
void LLWebRTCImpl::updateDevices()
{
- int16_t renderDeviceCount = mTuningDeviceModule->PlayoutDevices();
+ int16_t renderDeviceCount = mDeviceModule->PlayoutDevices();
mPlayoutDeviceList.clear();
#if WEBRTC_WIN
@@ -563,11 +579,11 @@ void LLWebRTCImpl::updateDevices()
{
char name[webrtc::kAdmMaxDeviceNameSize];
char guid[webrtc::kAdmMaxGuidSize];
- mTuningDeviceModule->PlayoutDeviceName(index, name, guid);
+ mDeviceModule->PlayoutDeviceName(index, name, guid);
mPlayoutDeviceList.emplace_back(name, guid);
}
- int16_t captureDeviceCount = mTuningDeviceModule->RecordingDevices();
+ int16_t captureDeviceCount = mDeviceModule->RecordingDevices();
mRecordingDeviceList.clear();
#if WEBRTC_WIN
@@ -581,7 +597,7 @@ void LLWebRTCImpl::updateDevices()
{
char name[webrtc::kAdmMaxDeviceNameSize];
char guid[webrtc::kAdmMaxGuidSize];
- mTuningDeviceModule->RecordingDeviceName(index, name, guid);
+ mDeviceModule->RecordingDeviceName(index, name, guid);
mRecordingDeviceList.emplace_back(name, guid);
}
@@ -593,11 +609,7 @@ void LLWebRTCImpl::updateDevices()
void LLWebRTCImpl::OnDevicesUpdated()
{
- // reset these to a bad value so an update is forced
- mRecordingDevice = RECORD_DEVICE_BAD;
- mPlayoutDevice = PLAYOUT_DEVICE_BAD;
-
- updateDevices();
+ deployDevices();
}
@@ -605,60 +617,109 @@ void LLWebRTCImpl::setTuningMode(bool enable)
{
mTuningMode = enable;
mWorkerThread->PostTask(
- [this, enable] {
- if (enable)
- {
- mPeerDeviceModule->StopRecording();
- mPeerDeviceModule->StopPlayout();
- ll_set_device_module_render_device(mTuningDeviceModule, mPlayoutDevice);
- ll_set_device_module_capture_device(mTuningDeviceModule, mRecordingDevice);
- mTuningDeviceModule->InitPlayout();
- mTuningDeviceModule->InitRecording();
- mTuningDeviceModule->StartRecording();
- // TODO: Starting Playout on the TDM appears to create an audio artifact (click)
- // in this case, so disabling it for now. We may have to do something different
- // if we enable 'echo playback' via the TDM when tuning.
- //mTuningDeviceModule->StartPlayout();
- }
- else
- {
- mTuningDeviceModule->StopRecording();
- //mTuningDeviceModule->StopPlayout();
- ll_set_device_module_render_device(mPeerDeviceModule, mPlayoutDevice);
- ll_set_device_module_capture_device(mPeerDeviceModule, mRecordingDevice);
- mPeerDeviceModule->SetStereoPlayout(true);
- mPeerDeviceModule->SetStereoRecording(false);
- mPeerDeviceModule->InitPlayout();
- mPeerDeviceModule->InitRecording();
- mPeerDeviceModule->StartPlayout();
- mPeerDeviceModule->StartRecording();
- }
- }
- );
- mSignalingThread->PostTask(
- [this, enable]
+ [this]
{
- for (auto &connection : mPeerConnections)
- {
- if (enable)
+ mDeviceModule->SetTuning(mTuningMode, mMute);
+ mSignalingThread->PostTask(
+ [this]
{
- connection->enableSenderTracks(false);
- }
- else
- {
- connection->resetMute();
- }
- connection->enableReceiverTracks(!enable);
- }
+ for (auto& connection : mPeerConnections)
+ {
+ if (mTuningMode)
+ {
+ connection->enableSenderTracks(false);
+ }
+ else
+ {
+ connection->resetMute();
+ }
+ connection->enableReceiverTracks(!mTuningMode);
+ }
+ });
+ });
+}
+
+void LLWebRTCImpl::deployDevices()
+{
+ if (0 < mDevicesDeploying.fetch_add(1, std::memory_order_relaxed))
+ {
+ return;
+ }
+ mWorkerThread->PostTask(
+ [this] {
+ workerDeployDevices();
});
}
-float LLWebRTCImpl::getTuningAudioLevel() { return -20 * log10f(mTuningAudioDeviceObserver->getMicrophoneEnergy()); }
+float LLWebRTCImpl::getTuningAudioLevel()
+{
+ return mDeviceModule ? -20 * log10f(mDeviceModule->GetMicrophoneEnergy()) : std::numeric_limits::infinity();
+}
-float LLWebRTCImpl::getPeerConnectionAudioLevel() { return -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy()); }
+void LLWebRTCImpl::setTuningMicGain(float gain)
+{
+ if (mTuningMode && mDeviceModule)
+ {
+ mDeviceModule->SetTuningMicGain(gain);
+ }
+}
+
+float LLWebRTCImpl::getPeerConnectionAudioLevel()
+{
+ return mTuningMode ? std::numeric_limits::infinity()
+ : (mPeerCustomProcessor ? -20 * log10f(mPeerCustomProcessor->getMicrophoneEnergy())
+ : std::numeric_limits::infinity());
+}
-void LLWebRTCImpl::setPeerConnectionGain(float gain) { mPeerCustomProcessor->setGain(gain); }
+void LLWebRTCImpl::setMicGain(float gain)
+{
+ mGain = gain;
+ if (!mTuningMode && mPeerCustomProcessor)
+ {
+ mPeerCustomProcessor->setGain(gain);
+ }
+}
+void LLWebRTCImpl::setMute(bool mute, int delay_ms)
+{
+ if (mMute != mute)
+ {
+ mMute = mute;
+ intSetMute(mute, delay_ms);
+ }
+}
+
+void LLWebRTCImpl::intSetMute(bool mute, int delay_ms)
+{
+ if (mPeerCustomProcessor)
+ {
+ mPeerCustomProcessor->setGain(mMute ? 0.0f : mGain);
+ }
+ if (mMute)
+ {
+ mWorkerThread->PostDelayedTask(
+ [this]
+ {
+ if (mDeviceModule)
+ {
+ mDeviceModule->ForceStopRecording();
+ }
+ },
+ webrtc::TimeDelta::Millis(delay_ms));
+ }
+ else
+ {
+ mWorkerThread->PostTask(
+ [this]
+ {
+ if (mDeviceModule)
+ {
+ mDeviceModule->InitRecording();
+ mDeviceModule->ForceStartRecording();
+ }
+ });
+ }
+}
//
// Peer Connection Helpers
@@ -666,34 +727,31 @@ void LLWebRTCImpl::setPeerConnectionGain(float gain) { mPeerCustomProcessor->set
LLWebRTCPeerConnectionInterface *LLWebRTCImpl::newPeerConnection()
{
- rtc::scoped_refptr peerConnection = rtc::scoped_refptr(new rtc::RefCountedObject());
+ bool empty = mPeerConnections.empty();
+ webrtc::scoped_refptr peerConnection = webrtc::scoped_refptr(new webrtc::RefCountedObject());
peerConnection->init(this);
-
- mPeerConnections.emplace_back(peerConnection);
- // Should it really start disabled?
- // Seems like something doesn't get the memo and senders need to be reset later
- // to remove the voice indicator from taskbar
- peerConnection->enableSenderTracks(false);
if (mPeerConnections.empty())
{
- setRecording(true);
- setPlayout(true);
+ intSetMute(mMute);
}
+ mPeerConnections.emplace_back(peerConnection);
+
+ peerConnection->enableSenderTracks(false);
+ peerConnection->resetMute();
return peerConnection.get();
}
void LLWebRTCImpl::freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection)
{
- std::vector>::iterator it =
+ std::vector>::iterator it =
std::find(mPeerConnections.begin(), mPeerConnections.end(), peer_connection);
if (it != mPeerConnections.end())
{
mPeerConnections.erase(it);
- }
- if (mPeerConnections.empty())
- {
- setRecording(false);
- setPlayout(false);
+ if (mPeerConnections.empty())
+ {
+ intSetMute(true);
+ }
}
}
@@ -729,7 +787,7 @@ void LLWebRTCPeerConnectionImpl::init(LLWebRTCImpl * webrtc_impl)
}
void LLWebRTCPeerConnectionImpl::terminate()
{
- mWebRTCImpl->SignalingBlockingCall(
+ mWebRTCImpl->PostSignalingTask(
[this]()
{
if (mPeerConnection)
@@ -753,7 +811,6 @@ void LLWebRTCPeerConnectionImpl::terminate()
track->set_enabled(false);
}
}
- mPeerConnection->SetAudioRecording(false);
mPeerConnection->Close();
if (mLocalStream)
@@ -840,7 +897,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti
mDataChannel->RegisterObserver(this);
}
- cricket::AudioOptions audioOptions;
+ webrtc::AudioOptions audioOptions;
audioOptions.auto_gain_control = true;
audioOptions.echo_cancellation = true;
audioOptions.noise_suppression = true;
@@ -848,7 +905,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti
mLocalStream = mPeerConnectionFactory->CreateLocalMediaStream("SLStream");
- rtc::scoped_refptr audio_track(
+ webrtc::scoped_refptr audio_track(
mPeerConnectionFactory->CreateAudioTrack("SLAudio", mPeerConnectionFactory->CreateAudioSource(audioOptions).get()));
audio_track->set_enabled(false);
mLocalStream->AddTrack(audio_track);
@@ -862,7 +919,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti
webrtc::RtpParameters params;
webrtc::RtpCodecParameters codecparam;
codecparam.name = "opus";
- codecparam.kind = cricket::MEDIA_TYPE_AUDIO;
+ codecparam.kind = webrtc::MediaType::AUDIO;
codecparam.clock_rate = 48000;
codecparam.num_channels = 2;
codecparam.parameters["stereo"] = "1";
@@ -877,7 +934,7 @@ bool LLWebRTCPeerConnectionImpl::initializeConnection(const LLWebRTCPeerConnecti
webrtc::RtpParameters params;
webrtc::RtpCodecParameters codecparam;
codecparam.name = "opus";
- codecparam.kind = cricket::MEDIA_TYPE_AUDIO;
+ codecparam.kind = webrtc::MediaType::AUDIO;
codecparam.clock_rate = 48000;
codecparam.num_channels = 2;
codecparam.parameters["stereo"] = "1";
@@ -904,7 +961,6 @@ void LLWebRTCPeerConnectionImpl::enableSenderTracks(bool enable)
// set_enabled shouldn't be done on the worker thread.
if (mPeerConnection)
{
- mPeerConnection->SetAudioRecording(enable);
auto senders = mPeerConnection->GetSenders();
for (auto &sender : senders)
{
@@ -938,7 +994,7 @@ void LLWebRTCPeerConnectionImpl::AnswerAvailable(const std::string &sdp)
{
RTC_LOG(LS_INFO) << __FUNCTION__ << " " << mPeerConnection->peer_connection_state();
mPeerConnection->SetRemoteDescription(webrtc::CreateSessionDescription(webrtc::SdpType::kAnswer, sdp),
- rtc::scoped_refptr(this));
+ webrtc::scoped_refptr(this));
}
});
}
@@ -951,22 +1007,22 @@ void LLWebRTCPeerConnectionImpl::AnswerAvailable(const std::string &sdp)
void LLWebRTCPeerConnectionImpl::setMute(bool mute)
{
EMicMuteState new_state = mute ? MUTE_MUTED : MUTE_UNMUTED;
- if (new_state == mMute)
- {
- return; // no change
- }
+
+ // even if mute hasn't changed, we still need to update the mute
+ // state on the connections to handle cases where the 'Default' device
+ // has changed in the OS (unplugged headset, etc.) which messes
+ // with the mute state.
+
bool force_reset = mMute == MUTE_INITIAL && mute;
bool enable = !mute;
mMute = new_state;
+
mWebRTCImpl->PostSignalingTask(
[this, force_reset, enable]()
{
if (mPeerConnection)
{
- // SetAudioRecording must be called before enabling/disabling tracks.
- mPeerConnection->SetAudioRecording(enable);
-
auto senders = mPeerConnection->GetSenders();
RTC_LOG(LS_INFO) << __FUNCTION__ << (mMute ? "disabling" : "enabling") << " streams count " << senders.size();
@@ -1046,14 +1102,14 @@ void LLWebRTCPeerConnectionImpl::setSendVolume(float volume)
// PeerConnectionObserver implementation.
//
-void LLWebRTCPeerConnectionImpl::OnAddTrack(rtc::scoped_refptr receiver,
- const std::vector> &streams)
+void LLWebRTCPeerConnectionImpl::OnAddTrack(webrtc::scoped_refptr receiver,
+ const std::vector> &streams)
{
RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id();
webrtc::RtpParameters params;
webrtc::RtpCodecParameters codecparam;
codecparam.name = "opus";
- codecparam.kind = cricket::MEDIA_TYPE_AUDIO;
+ codecparam.kind = webrtc::MediaType::AUDIO;
codecparam.clock_rate = 48000;
codecparam.num_channels = 2;
codecparam.parameters["stereo"] = "1";
@@ -1062,12 +1118,12 @@ void LLWebRTCPeerConnectionImpl::OnAddTrack(rtc::scoped_refptrSetParameters(params);
}
-void LLWebRTCPeerConnectionImpl::OnRemoveTrack(rtc::scoped_refptr receiver)
+void LLWebRTCPeerConnectionImpl::OnRemoveTrack(webrtc::scoped_refptr receiver)
{
RTC_LOG(LS_INFO) << __FUNCTION__ << " " << receiver->id();
}
-void LLWebRTCPeerConnectionImpl::OnDataChannel(rtc::scoped_refptr channel)
+void LLWebRTCPeerConnectionImpl::OnDataChannel(webrtc::scoped_refptr channel)
{
if (mDataChannel)
{
@@ -1154,23 +1210,23 @@ static std::string iceCandidateToTrickleString(const webrtc::IceCandidateInterfa
candidate->candidate().address().ipaddr().ToString() << " " <<
candidate->candidate().address().PortAsString() << " typ ";
- if (candidate->candidate().type() == cricket::LOCAL_PORT_TYPE)
+ if (candidate->candidate().type() == webrtc::IceCandidateType::kHost)
{
candidate_stream << "host";
}
- else if (candidate->candidate().type() == cricket::STUN_PORT_TYPE)
+ else if (candidate->candidate().type() == webrtc::IceCandidateType::kSrflx)
{
candidate_stream << "srflx " <<
"raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " <<
"rport " << candidate->candidate().related_address().PortAsString();
}
- else if (candidate->candidate().type() == cricket::RELAY_PORT_TYPE)
+ else if (candidate->candidate().type() == webrtc::IceCandidateType::kRelay)
{
candidate_stream << "relay " <<
"raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " <<
"rport " << candidate->candidate().related_address().PortAsString();
}
- else if (candidate->candidate().type() == cricket::PRFLX_PORT_TYPE)
+ else if (candidate->candidate().type() == webrtc::IceCandidateType::kPrflx)
{
candidate_stream << "prflx " <<
"raddr " << candidate->candidate().related_address().ipaddr().ToString() << " " <<
@@ -1265,7 +1321,7 @@ void LLWebRTCPeerConnectionImpl::OnSuccess(webrtc::SessionDescriptionInterface *
mPeerConnection->SetLocalDescription(std::unique_ptr(
webrtc::CreateSessionDescription(webrtc::SdpType::kOffer, mangled_sdp)),
- rtc::scoped_refptr(this));
+ webrtc::scoped_refptr(this));
}
@@ -1375,7 +1431,7 @@ void LLWebRTCPeerConnectionImpl::sendData(const std::string& data, bool binary)
{
if (mDataChannel)
{
- rtc::CopyOnWriteBuffer cowBuffer(data.data(), data.length());
+ webrtc::CopyOnWriteBuffer cowBuffer(data.data(), data.length());
webrtc::DataBuffer buffer(cowBuffer, binary);
mWebRTCImpl->PostNetworkTask([this, buffer]() {
if (mDataChannel)
diff --git a/indra/llwebrtc/llwebrtc.h b/indra/llwebrtc/llwebrtc.h
index c6fdb909ddc..7d06b7d2b40 100644
--- a/indra/llwebrtc/llwebrtc.h
+++ b/indra/llwebrtc/llwebrtc.h
@@ -159,7 +159,10 @@ class LLWebRTCDeviceInterface
virtual void setTuningMode(bool enable) = 0;
virtual float getTuningAudioLevel() = 0; // for use during tuning
virtual float getPeerConnectionAudioLevel() = 0; // for use when not tuning
- virtual void setPeerConnectionGain(float gain) = 0;
+ virtual void setMicGain(float gain) = 0;
+ virtual void setTuningMicGain(float gain) = 0;
+
+ virtual void setMute(bool mute, int delay_ms = 0) = 0;
};
// LLWebRTCAudioInterface provides the viewer with a way
diff --git a/indra/llwebrtc/llwebrtc_impl.h b/indra/llwebrtc/llwebrtc_impl.h
index b6294dbd4a5..51d42c82b24 100644
--- a/indra/llwebrtc/llwebrtc_impl.h
+++ b/indra/llwebrtc/llwebrtc_impl.h
@@ -54,12 +54,12 @@
#include "rtc_base/ref_counted_object.h"
#include "rtc_base/ssl_adapter.h"
#include "rtc_base/thread.h"
+#include "rtc_base/logging.h"
#include "api/peer_connection_interface.h"
#include "api/media_stream_interface.h"
#include "api/create_peerconnection_factory.h"
#include "modules/audio_device/include/audio_device.h"
#include "modules/audio_device/include/audio_device_data_observer.h"
-#include "rtc_base/task_queue.h"
#include "api/task_queue/task_queue_factory.h"
#include "api/task_queue/default_task_queue_factory.h"
#include "modules/audio_device/include/audio_device_defines.h"
@@ -69,35 +69,30 @@ namespace llwebrtc
class LLWebRTCPeerConnectionImpl;
-class LLWebRTCLogSink : public rtc::LogSink {
+class LLWebRTCLogSink : public webrtc::LogSink
+{
public:
- LLWebRTCLogSink(LLWebRTCLogCallback* callback) :
- mCallback(callback)
- {
- }
+ LLWebRTCLogSink(LLWebRTCLogCallback* callback) : mCallback(callback) {}
// Destructor: close the log file
- ~LLWebRTCLogSink() override
- {
- }
+ ~LLWebRTCLogSink() override {}
- void OnLogMessage(const std::string& msg,
- rtc::LoggingSeverity severity) override
+ void OnLogMessage(const std::string& msg, webrtc::LoggingSeverity severity) override
{
if (mCallback)
{
- switch(severity)
+ switch (severity)
{
- case rtc::LS_VERBOSE:
+ case webrtc::LS_VERBOSE:
mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg);
break;
- case rtc::LS_INFO:
+ case webrtc::LS_INFO:
mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg);
break;
- case rtc::LS_WARNING:
+ case webrtc::LS_WARNING:
mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg);
break;
- case rtc::LS_ERROR:
+ case webrtc::LS_ERROR:
mCallback->LogMessage(LLWebRTCLogCallback::LOG_LEVEL_VERBOSE, msg);
break;
default:
@@ -118,73 +113,307 @@ class LLWebRTCLogSink : public rtc::LogSink {
LLWebRTCLogCallback* mCallback;
};
-// Implements a class allowing capture of audio data
-// to determine audio level of the microphone.
-class LLAudioDeviceObserver : public webrtc::AudioDeviceDataObserver
+// -----------------------------------------------------------------------------
+// A proxy transport that forwards capture data to two AudioTransport sinks:
+// - the "engine" (libwebrtc's VoiceEngine)
+// - the "user" (your app's listener)
+//
+// Playout (NeedMorePlayData) goes only to the engine by default to avoid
+// double-writing into the output buffer. See notes below if you want a tap.
+// -----------------------------------------------------------------------------
+class LLWebRTCAudioTransport : public webrtc::AudioTransport
{
- public:
- LLAudioDeviceObserver();
-
- // Retrieve the RMS audio loudness
- float getMicrophoneEnergy();
-
- // Data retrieved from the caputure device is
- // passed in here for processing.
- void OnCaptureData(const void *audio_samples,
- const size_t num_samples,
- const size_t bytes_per_sample,
- const size_t num_channels,
- const uint32_t samples_per_sec) override;
-
- // This is for data destined for the render device.
- // not currently used.
- void OnRenderData(const void *audio_samples,
- const size_t num_samples,
- const size_t bytes_per_sample,
- const size_t num_channels,
- const uint32_t samples_per_sec) override;
+public:
+ LLWebRTCAudioTransport();
+
+ void SetEngineTransport(webrtc::AudioTransport* t);
+
+ // -------- Capture path: fan out to both sinks --------
+ int32_t RecordedDataIsAvailable(const void* audio_data,
+ size_t number_of_samples,
+ size_t bytes_per_sample,
+ size_t number_of_channels,
+ uint32_t samples_per_sec,
+ uint32_t total_delay_ms,
+ int32_t clock_drift,
+ uint32_t current_mic_level,
+ bool key_pressed,
+ uint32_t& new_mic_level) override;
+
+ // -------- Playout path: delegate to engine only --------
+ int32_t NeedMorePlayData(size_t number_of_samples,
+ size_t bytes_per_sample,
+ size_t number_of_channels,
+ uint32_t samples_per_sec,
+ void* audio_data,
+ size_t& number_of_samples_out,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) override;
+
+ // Method to pull mixed render audio data from all active VoE channels.
+ // The data will not be passed as reference for audio processing internally.
+ void PullRenderData(int bits_per_sample,
+ int sample_rate,
+ size_t number_of_channels,
+ size_t number_of_frames,
+ void* audio_data,
+ int64_t* elapsed_time_ms,
+ int64_t* ntp_time_ms) override;
+
+ float GetMicrophoneEnergy() { return mMicrophoneEnergy.load(std::memory_order_relaxed); }
+ void SetGain(float gain) { mGain.store(gain, std::memory_order_relaxed); }
+
+private:
+ std::atomic engine_{ nullptr };
+ static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing (30 frames)
+ float mSumVector[NUM_PACKETS_TO_FILTER];
+ std::atomic mMicrophoneEnergy;
+ std::atomic mGain{ 0.0f };
- protected:
- static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing (30 frames)
- float mSumVector[NUM_PACKETS_TO_FILTER];
- float mMicrophoneEnergy;
};
+
+// -----------------------------------------------------------------------------
+// LLWebRTCAudioDeviceModule
+// - Wraps a real ADM to provide microphone energy for tuning
+// -----------------------------------------------------------------------------
+class LLWebRTCAudioDeviceModule : public webrtc::AudioDeviceModule
+{
+public:
+ explicit LLWebRTCAudioDeviceModule(webrtc::scoped_refptr inner) : inner_(std::move(inner)), tuning_(false)
+ {
+ RTC_CHECK(inner_);
+ }
+
+ // ----- AudioDeviceModule interface: we mostly forward to |inner_| -----
+ int32_t ActiveAudioLayer(AudioLayer* audioLayer) const override { return inner_->ActiveAudioLayer(audioLayer); }
+
+ int32_t RegisterAudioCallback(webrtc::AudioTransport* engine_transport) override
+ {
+ // The engine registers its transport here. We put our audio transport between engine and ADM.
+ audio_transport_.SetEngineTransport(engine_transport);
+ // Register our proxy with the real ADM.
+ return inner_->RegisterAudioCallback(&audio_transport_);
+ }
+
+ int32_t Init() override { return inner_->Init(); }
+ int32_t Terminate() override { return inner_->Terminate(); }
+ bool Initialized() const override { return inner_->Initialized(); }
+
+ // --- Device enumeration/selection (forward) ---
+ int16_t PlayoutDevices() override { return inner_->PlayoutDevices(); }
+ int16_t RecordingDevices() override { return inner_->RecordingDevices(); }
+ int32_t PlayoutDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) override
+ {
+ return inner_->PlayoutDeviceName(index, name, guid);
+ }
+ int32_t RecordingDeviceName(uint16_t index, char name[webrtc::kAdmMaxDeviceNameSize], char guid[webrtc::kAdmMaxGuidSize]) override
+ {
+ return inner_->RecordingDeviceName(index, name, guid);
+ }
+ int32_t SetPlayoutDevice(uint16_t index) override { return inner_->SetPlayoutDevice(index); }
+ int32_t SetRecordingDevice(uint16_t index) override { return inner_->SetRecordingDevice(index); }
+
+ // Windows default/communications selectors, if your branch exposes them:
+ int32_t SetPlayoutDevice(WindowsDeviceType type) override { return inner_->SetPlayoutDevice(type); }
+ int32_t SetRecordingDevice(WindowsDeviceType type) override { return inner_->SetRecordingDevice(type); }
+
+ // --- Init/start/stop (forward) ---
+ int32_t InitPlayout() override { return inner_->InitPlayout(); }
+ bool PlayoutIsInitialized() const override { return inner_->PlayoutIsInitialized(); }
+ int32_t StartPlayout() override {
+ if (tuning_) return 0; // For tuning, don't allow playout
+ return inner_->StartPlayout();
+ }
+ int32_t StopPlayout() override { return inner_->StopPlayout(); }
+ bool Playing() const override { return inner_->Playing(); }
+
+ int32_t InitRecording() override { return inner_->InitRecording(); }
+ bool RecordingIsInitialized() const override { return inner_->RecordingIsInitialized(); }
+ int32_t StartRecording() override {
+ // ignore start recording as webrtc.lib will
+ // send one when streams first connect, resulting
+ // in an inadvertant 'recording' when mute is on.
+ // We take full control of StartRecording via
+ // ForceStartRecording below.
+ return 0;
+ }
+ int32_t StopRecording() override {
+ if (tuning_) return 0; // if we're tuning, disregard the StopRecording we get from disabling the streams
+ return inner_->StopRecording();
+ }
+ int32_t ForceStartRecording() { return inner_->StartRecording(); }
+ int32_t ForceStopRecording() { return inner_->StopRecording(); }
+ bool Recording() const override { return inner_->Recording(); }
+
+ // --- Stereo opts (forward if available on your branch) ---
+ int32_t SetStereoPlayout(bool enable) override { return inner_->SetStereoPlayout(enable); }
+ int32_t SetStereoRecording(bool enable) override { return inner_->SetStereoRecording(enable); }
+ int32_t PlayoutIsAvailable(bool* available) override { return inner_->PlayoutIsAvailable(available); }
+ int32_t RecordingIsAvailable(bool* available) override { return inner_->RecordingIsAvailable(available); }
+
+ // --- AGC/Volume/Mute/etc. (forward) ---
+ int32_t SetMicrophoneVolume(uint32_t volume) override { return inner_->SetMicrophoneVolume(volume); }
+ int32_t MicrophoneVolume(uint32_t* volume) const override { return inner_->MicrophoneVolume(volume); }
+
+ // --- Speaker/Microphone init (forward) ---
+ int32_t InitSpeaker() override { return inner_->InitSpeaker(); }
+ bool SpeakerIsInitialized() const override { return inner_->SpeakerIsInitialized(); }
+ int32_t InitMicrophone() override { return inner_->InitMicrophone(); }
+ bool MicrophoneIsInitialized() const override { return inner_->MicrophoneIsInitialized(); }
+
+ // --- Speaker Volume (forward) ---
+ int32_t SpeakerVolumeIsAvailable(bool* available) override { return inner_->SpeakerVolumeIsAvailable(available); }
+ int32_t SetSpeakerVolume(uint32_t volume) override { return inner_->SetSpeakerVolume(volume); }
+ int32_t SpeakerVolume(uint32_t* volume) const override { return inner_->SpeakerVolume(volume); }
+ int32_t MaxSpeakerVolume(uint32_t* maxVolume) const override { return inner_->MaxSpeakerVolume(maxVolume); }
+ int32_t MinSpeakerVolume(uint32_t* minVolume) const override { return inner_->MinSpeakerVolume(minVolume); }
+
+ // --- Microphone Volume (forward) ---
+ int32_t MicrophoneVolumeIsAvailable(bool* available) override { return inner_->MicrophoneVolumeIsAvailable(available); }
+ int32_t MaxMicrophoneVolume(uint32_t* maxVolume) const override { return inner_->MaxMicrophoneVolume(maxVolume); }
+ int32_t MinMicrophoneVolume(uint32_t* minVolume) const override { return inner_->MinMicrophoneVolume(minVolume); }
+
+ // --- Speaker Mute (forward) ---
+ int32_t SpeakerMuteIsAvailable(bool* available) override { return inner_->SpeakerMuteIsAvailable(available); }
+ int32_t SetSpeakerMute(bool enable) override { return inner_->SetSpeakerMute(enable); }
+ int32_t SpeakerMute(bool* enabled) const override { return inner_->SpeakerMute(enabled); }
+
+ // --- Microphone Mute (forward) ---
+ int32_t MicrophoneMuteIsAvailable(bool* available) override { return inner_->MicrophoneMuteIsAvailable(available); }
+ int32_t SetMicrophoneMute(bool enable) override { return inner_->SetMicrophoneMute(enable); }
+ int32_t MicrophoneMute(bool* enabled) const override { return inner_->MicrophoneMute(enabled); }
+
+ // --- Stereo Support (forward) ---
+ int32_t StereoPlayoutIsAvailable(bool* available) const override { return inner_->StereoPlayoutIsAvailable(available); }
+ int32_t StereoPlayout(bool* enabled) const override { return inner_->StereoPlayout(enabled); }
+ int32_t StereoRecordingIsAvailable(bool* available) const override { return inner_->StereoRecordingIsAvailable(available); }
+ int32_t StereoRecording(bool* enabled) const override { return inner_->StereoRecording(enabled); }
+
+ // --- Delay/Timing (forward) ---
+ int32_t PlayoutDelay(uint16_t* delayMS) const override { return inner_->PlayoutDelay(delayMS); }
+
+ // --- Built-in Audio Processing (forward) ---
+ bool BuiltInAECIsAvailable() const override { return inner_->BuiltInAECIsAvailable(); }
+ bool BuiltInAGCIsAvailable() const override { return inner_->BuiltInAGCIsAvailable(); }
+ bool BuiltInNSIsAvailable() const override { return inner_->BuiltInNSIsAvailable(); }
+ int32_t EnableBuiltInAEC(bool enable) override { return inner_->EnableBuiltInAEC(enable); }
+ int32_t EnableBuiltInAGC(bool enable) override { return inner_->EnableBuiltInAGC(enable); }
+ int32_t EnableBuiltInNS(bool enable) override { return inner_->EnableBuiltInNS(enable); }
+
+ // --- Additional AudioDeviceModule methods (forward) ---
+ int32_t GetPlayoutUnderrunCount() const override { return inner_->GetPlayoutUnderrunCount(); }
+
+ // Used to generate RTC stats. If not implemented, RTCAudioPlayoutStats will
+ // not be present in the stats.
+ std::optional GetStats() const override { return inner_->GetStats(); }
+
+// Only supported on iOS.
+#if defined(WEBRTC_IOS)
+ virtual int GetPlayoutAudioParameters(AudioParameters* params) const override { return inner_->GetPlayoutAudioParameters(params); }
+ virtual int GetRecordAudioParameters(AudioParameters* params) override { return inner_->GetRecordAudioParameters(params); }
+#endif // WEBRTC_IOS
+
+ virtual int32_t GetPlayoutDevice() const override { return inner_->GetPlayoutDevice(); }
+ virtual int32_t GetRecordingDevice() const override { return inner_->GetRecordingDevice(); }
+ virtual int32_t SetObserver(webrtc::AudioDeviceObserver* observer) override { return inner_->SetObserver(observer); }
+
+ // tuning microphone energy calculations
+ float GetMicrophoneEnergy() { return audio_transport_.GetMicrophoneEnergy(); }
+ void SetTuningMicGain(float gain) { audio_transport_.SetGain(gain); }
+ void SetTuning(bool tuning, bool mute)
+ {
+ tuning_ = tuning;
+ if (tuning)
+ {
+ inner_->InitRecording();
+ inner_->StartRecording();
+ inner_->StopPlayout();
+ }
+ else
+ {
+ if (mute)
+ {
+ inner_->StopRecording();
+ }
+ else
+ {
+ inner_->InitRecording();
+ inner_->StartRecording();
+ }
+ inner_->StartPlayout();
+ }
+ }
+
+protected:
+ ~LLWebRTCAudioDeviceModule() override = default;
+
+private:
+ webrtc::scoped_refptr inner_;
+ LLWebRTCAudioTransport audio_transport_;
+
+ bool tuning_;
+};
+
+class LLCustomProcessorState
+{
+
+public:
+ float getMicrophoneEnergy() { return mMicrophoneEnergy.load(std::memory_order_relaxed); }
+ void setMicrophoneEnergy(float energy) { mMicrophoneEnergy.store(energy, std::memory_order_relaxed); }
+
+ void setGain(float gain)
+ {
+ mGain.store(gain, std::memory_order_relaxed);
+ mDirty.store(true, std::memory_order_relaxed);
+ }
+
+ float getGain() { return mGain.load(std::memory_order_relaxed); }
+
+ bool getDirty() { return mDirty.exchange(false, std::memory_order_relaxed); }
+
+ protected:
+ std::atomic mDirty{ true };
+ std::atomic mMicrophoneEnergy{ 0.0f };
+ std::atomic mGain{ 0.0f };
+};
+
+using LLCustomProcessorStatePtr = std::shared_ptr;
+
// Used to process/retrieve audio levels after
// all of the processing (AGC, AEC, etc.) for display in-world to the user.
class LLCustomProcessor : public webrtc::CustomProcessing
{
- public:
- LLCustomProcessor();
+public:
+ LLCustomProcessor(LLCustomProcessorStatePtr state);
~LLCustomProcessor() override {}
// (Re-) Initializes the submodule.
void Initialize(int sample_rate_hz, int num_channels) override;
// Analyzes the given capture or render signal.
- void Process(webrtc::AudioBuffer *audio) override;
+ void Process(webrtc::AudioBuffer* audio) override;
// Returns a string representation of the module state.
std::string ToString() const override { return ""; }
- float getMicrophoneEnergy() { return mMicrophoneEnergy; }
-
- void setGain(float gain) { mGain = gain; }
-
- protected:
- static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing
- int mSampleRateHz;
- int mNumChannels;
+protected:
+ static const int NUM_PACKETS_TO_FILTER = 30; // 300 ms of smoothing
+ int mSampleRateHz{ 48000 };
+ int mNumChannels{ 2 };
+ int mRampFrames{ 2 };
+ float mCurrentGain{ 0.0f };
+ float mGainStep{ 0.0f };
float mSumVector[NUM_PACKETS_TO_FILTER];
- float mMicrophoneEnergy;
- float mGain;
+ friend LLCustomProcessorState;
+ LLCustomProcessorStatePtr mState;
};
// Primary singleton implementation for interfacing
// with the native webrtc library.
-class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceSink
+class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceObserver
{
public:
LLWebRTCImpl(LLWebRTCLogCallback* logCallback);
@@ -214,10 +443,15 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS
float getTuningAudioLevel() override;
float getPeerConnectionAudioLevel() override;
- void setPeerConnectionGain(float gain) override;
+ void setMicGain(float gain) override;
+ void setTuningMicGain(float gain) override;
+
+ void setMute(bool mute, int delay_ms = 20) override;
+
+ void intSetMute(bool mute, int delay_ms = 20);
//
- // AudioDeviceSink
+ // AudioDeviceObserver
//
void OnDevicesUpdated() override;
@@ -246,19 +480,19 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS
mNetworkThread->PostTask(std::move(task), location);
}
- void WorkerBlockingCall(rtc::FunctionView functor,
+ void WorkerBlockingCall(webrtc::FunctionView functor,
const webrtc::Location& location = webrtc::Location::Current())
{
mWorkerThread->BlockingCall(std::move(functor), location);
}
- void SignalingBlockingCall(rtc::FunctionView functor,
+ void SignalingBlockingCall(webrtc::FunctionView functor,
const webrtc::Location& location = webrtc::Location::Current())
{
mSignalingThread->BlockingCall(std::move(functor), location);
}
- void NetworkBlockingCall(rtc::FunctionView functor,
+ void NetworkBlockingCall(webrtc::FunctionView functor,
const webrtc::Location& location = webrtc::Location::Current())
{
mNetworkThread->BlockingCall(std::move(functor), location);
@@ -266,7 +500,7 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS
// Allows the LLWebRTCPeerConnectionImpl class to retrieve the
// native webrtc PeerConnectionFactory.
- rtc::scoped_refptr getPeerConnectionFactory()
+ webrtc::scoped_refptr getPeerConnectionFactory()
{
return mPeerConnectionFactory;
}
@@ -275,49 +509,47 @@ class LLWebRTCImpl : public LLWebRTCDeviceInterface, public webrtc::AudioDeviceS
LLWebRTCPeerConnectionInterface* newPeerConnection();
void freePeerConnection(LLWebRTCPeerConnectionInterface* peer_connection);
- // enables/disables capture via the capture device
- void setRecording(bool recording);
-
- void setPlayout(bool playing);
-
protected:
+
+ void workerDeployDevices();
LLWebRTCLogSink* mLogSink;
// The native webrtc threads
- std::unique_ptr mNetworkThread;
- std::unique_ptr mWorkerThread;
- std::unique_ptr mSignalingThread;
+ std::unique_ptr mNetworkThread;
+ std::unique_ptr mWorkerThread;
+ std::unique_ptr mSignalingThread;
// The factory that allows creation of native webrtc PeerConnections.
- rtc::scoped_refptr mPeerConnectionFactory;
+ webrtc::scoped_refptr mPeerConnectionFactory;
- rtc::scoped_refptr mAudioProcessingModule;
+ webrtc::scoped_refptr mAudioProcessingModule;
// more native webrtc stuff
- std::unique_ptr mTaskQueueFactory;
+ std::unique_ptr mTaskQueueFactory;
// Devices
void updateDevices();
- rtc::scoped_refptr mTuningDeviceModule;
- rtc::scoped_refptr mPeerDeviceModule;
+ void deployDevices();
+ std::atomic mDevicesDeploying;
+ webrtc::scoped_refptr mDeviceModule;
std::vector mVoiceDevicesObserverList;
// accessors in native webrtc for devices aren't apparently implemented yet.
bool mTuningMode;
- int32_t mRecordingDevice;
+ std::string mRecordingDevice;
LLWebRTCVoiceDeviceList mRecordingDeviceList;
- int32_t mPlayoutDevice;
+ std::string mPlayoutDevice;
LLWebRTCVoiceDeviceList mPlayoutDeviceList;
bool mMute;
+ float mGain;
- LLAudioDeviceObserver * mTuningAudioDeviceObserver;
- LLCustomProcessor * mPeerCustomProcessor;
+ LLCustomProcessorStatePtr mPeerCustomProcessor;
// peer connections
- std::vector> mPeerConnections;
+ std::vector> mPeerConnections;
};
@@ -342,7 +574,7 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface,
void terminate();
virtual void AddRef() const override = 0;
- virtual rtc::RefCountReleaseStatus Release() const override = 0;
+ virtual webrtc::RefCountReleaseStatus Release() const override = 0;
//
// LLWebRTCPeerConnection
@@ -373,10 +605,10 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface,
//
void OnSignalingChange(webrtc::PeerConnectionInterface::SignalingState new_state) override {}
- void OnAddTrack(rtc::scoped_refptr receiver,
- const std::vector> &streams) override;
- void OnRemoveTrack(rtc::scoped_refptr receiver) override;
- void OnDataChannel(rtc::scoped_refptr channel) override;
+ void OnAddTrack(webrtc::scoped_refptr receiver,
+ const std::vector> &streams) override;
+ void OnRemoveTrack(webrtc::scoped_refptr receiver) override;
+ void OnDataChannel(webrtc::scoped_refptr channel) override;
void OnRenegotiationNeeded() override {}
void OnIceConnectionChange(webrtc::PeerConnectionInterface::IceConnectionState new_state) override {};
void OnIceGatheringChange(webrtc::PeerConnectionInterface::IceGatheringState new_state) override;
@@ -415,7 +647,7 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface,
LLWebRTCImpl * mWebRTCImpl;
- rtc::scoped_refptr mPeerConnectionFactory;
+ webrtc::scoped_refptr mPeerConnectionFactory;
typedef enum {
MUTE_INITIAL,
@@ -429,12 +661,12 @@ class LLWebRTCPeerConnectionImpl : public LLWebRTCPeerConnectionInterface,
std::vector> mCachedIceCandidates;
bool mAnswerReceived;
- rtc::scoped_refptr mPeerConnection;
- rtc::scoped_refptr mLocalStream;
+ webrtc::scoped_refptr mPeerConnection;
+ webrtc::scoped_refptr mLocalStream;
// data
std::vector mDataObserverList;
- rtc::scoped_refptr mDataChannel;
+ webrtc::scoped_refptr mDataChannel;
};
}
diff --git a/indra/newview/llvoicewebrtc.cpp b/indra/newview/llvoicewebrtc.cpp
index 34f3e22182f..b26a48fd5f8 100644
--- a/indra/newview/llvoicewebrtc.cpp
+++ b/indra/newview/llvoicewebrtc.cpp
@@ -82,9 +82,15 @@ const std::string WEBRTC_VOICE_SERVER_TYPE = "webrtc";
namespace {
- const F32 MAX_AUDIO_DIST = 50.0f;
- const F32 VOLUME_SCALE_WEBRTC = 0.01f;
- const F32 LEVEL_SCALE_WEBRTC = 0.008f;
+ const F32 MAX_AUDIO_DIST = 50.0f;
+ const F32 VOLUME_SCALE_WEBRTC = 0.01f;
+ const F32 TUNING_LEVEL_SCALE = 0.01f;
+ const F32 TUNING_LEVEL_START_POINT = 0.8f;
+ const F32 LEVEL_SCALE = 0.005f;
+ const F32 LEVEL_START_POINT = 0.18f;
+ const uint32_t SET_HIDDEN_RESTORE_DELAY_MS = 200; // 200 ms to unmute again after hiding during teleport
+ const uint32_t MUTE_FADE_DELAY_MS = 500; // 20ms fade followed by 480ms silence gets rid of the click just after unmuting.
+ // This is because the buffers and processing is cleared by the silence.
const F32 SPEAKING_AUDIO_LEVEL = 0.30;
@@ -201,7 +207,6 @@ bool LLWebRTCVoiceClient::sShuttingDown = false;
LLWebRTCVoiceClient::LLWebRTCVoiceClient() :
mHidden(false),
- mTuningMode(false),
mTuningMicGain(0.0),
mTuningSpeakerVolume(50), // Set to 50 so the user can hear themselves when he sets his mic volume
mDevicesListUpdated(false),
@@ -348,25 +353,45 @@ void LLWebRTCVoiceClient::updateSettings()
static LLCachedControl sOutputDevice(gSavedSettings, "VoiceOutputAudioDevice");
setRenderDevice(sOutputDevice);
- LL_INFOS("Voice") << "Input device: " << std::quoted(sInputDevice()) << ", output device: " << std::quoted(sOutputDevice()) << LL_ENDL;
+ LL_INFOS("Voice") << "Input device: " << std::quoted(sInputDevice()) << ", output device: " << std::quoted(sOutputDevice())
+ << LL_ENDL;
static LLCachedControl sMicLevel(gSavedSettings, "AudioLevelMic");
setMicGain(sMicLevel);
llwebrtc::LLWebRTCDeviceInterface::AudioConfig config;
+ bool audioConfigChanged = false;
+
static LLCachedControl sEchoCancellation(gSavedSettings, "VoiceEchoCancellation", true);
- config.mEchoCancellation = sEchoCancellation;
+ if (sEchoCancellation != config.mEchoCancellation)
+ {
+ config.mEchoCancellation = sEchoCancellation;
+ audioConfigChanged = true;
+ }
static LLCachedControl sAGC(gSavedSettings, "VoiceAutomaticGainControl", true);
- config.mAGC = sAGC;
+ if (sAGC != config.mAGC)
+ {
+ config.mAGC = sAGC;
+ audioConfigChanged = true;
+ }
- static LLCachedControl sNoiseSuppressionLevel(gSavedSettings,
+ static LLCachedControl sNoiseSuppressionLevel(
+ gSavedSettings,
"VoiceNoiseSuppressionLevel",
llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel::NOISE_SUPPRESSION_LEVEL_VERY_HIGH);
- config.mNoiseSuppressionLevel = (llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel)(U32)sNoiseSuppressionLevel;
-
- mWebRTCDeviceInterface->setAudioConfig(config);
+ auto noiseSuppressionLevel =
+ (llwebrtc::LLWebRTCDeviceInterface::AudioConfig::ENoiseSuppressionLevel)(U32)sNoiseSuppressionLevel;
+ if (noiseSuppressionLevel != config.mNoiseSuppressionLevel)
+ {
+ config.mNoiseSuppressionLevel = noiseSuppressionLevel;
+ audioConfigChanged = true;
+ }
+ if (audioConfigChanged)
+ {
+ mWebRTCDeviceInterface->setAudioConfig(config);
+ }
}
}
@@ -695,21 +720,38 @@ void LLWebRTCVoiceClient::OnDevicesChangedImpl(const llwebrtc::LLWebRTCVoiceDevi
std::string outputDevice = gSavedSettings.getString("VoiceOutputAudioDevice");
LL_DEBUGS("Voice") << "Setting devices to-input: '" << inputDevice << "' output: '" << outputDevice << "'" << LL_ENDL;
- clearRenderDevices();
- for (auto &device : render_devices)
+
+ // only set the render device if the device list has changed.
+ if (mRenderDevices.size() != render_devices.size() || !std::equal(mRenderDevices.begin(),
+ mRenderDevices.end(),
+ render_devices.begin(),
+ [](const LLVoiceDevice& a, const llwebrtc::LLWebRTCVoiceDevice& b) {
+ return a.display_name == b.mDisplayName && a.full_name == b.mID; }))
{
- addRenderDevice(LLVoiceDevice(device.mDisplayName, device.mID));
+ clearRenderDevices();
+ for (auto& device : render_devices)
+ {
+ addRenderDevice(LLVoiceDevice(device.mDisplayName, device.mID));
+ }
+ setRenderDevice(outputDevice);
}
- setRenderDevice(outputDevice);
- clearCaptureDevices();
- for (auto &device : capture_devices)
+ // only set the capture device if the device list has changed.
+ if (mCaptureDevices.size() != capture_devices.size() ||!std::equal(mCaptureDevices.begin(),
+ mCaptureDevices.end(),
+ capture_devices.begin(),
+ [](const LLVoiceDevice& a, const llwebrtc::LLWebRTCVoiceDevice& b)
+ { return a.display_name == b.mDisplayName && a.full_name == b.mID; }))
{
- LL_DEBUGS("Voice") << "Checking capture device:'" << device.mID << "'" << LL_ENDL;
+ clearCaptureDevices();
+ for (auto& device : capture_devices)
+ {
+ LL_DEBUGS("Voice") << "Checking capture device:'" << device.mID << "'" << LL_ENDL;
- addCaptureDevice(LLVoiceDevice(device.mDisplayName, device.mID));
+ addCaptureDevice(LLVoiceDevice(device.mDisplayName, device.mID));
+ }
+ setCaptureDevice(inputDevice);
}
- setCaptureDevice(inputDevice);
setDevicesListUpdated(true);
}
@@ -762,7 +804,14 @@ bool LLWebRTCVoiceClient::inTuningMode()
void LLWebRTCVoiceClient::tuningSetMicVolume(float volume)
{
- mTuningMicGain = volume;
+ if (volume != mTuningMicGain)
+ {
+ mTuningMicGain = volume;
+ if (mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->setTuningMicGain(volume);
+ }
+ }
}
void LLWebRTCVoiceClient::tuningSetSpeakerVolume(float volume)
@@ -774,21 +823,10 @@ void LLWebRTCVoiceClient::tuningSetSpeakerVolume(float volume)
}
}
-float LLWebRTCVoiceClient::getAudioLevel()
-{
- if (mIsInTuningMode)
- {
- return (1.0f - mWebRTCDeviceInterface->getTuningAudioLevel() * LEVEL_SCALE_WEBRTC) * mTuningMicGain / 2.1f;
- }
- else
- {
- return (1.0f - mWebRTCDeviceInterface->getPeerConnectionAudioLevel() * LEVEL_SCALE_WEBRTC) * mMicGain / 2.1f;
- }
-}
-
float LLWebRTCVoiceClient::tuningGetEnergy(void)
{
- return getAudioLevel();
+ float rms = mWebRTCDeviceInterface->getTuningAudioLevel();
+ return TUNING_LEVEL_START_POINT - TUNING_LEVEL_SCALE * rms;
}
bool LLWebRTCVoiceClient::deviceSettingsAvailable()
@@ -824,6 +862,11 @@ void LLWebRTCVoiceClient::setHidden(bool hidden)
if (inSpatialChannel())
{
+ if (mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->setMute(mHidden || mMuteMic,
+ mHidden ? 0 : SET_HIDDEN_RESTORE_DELAY_MS); // delay 200ms so as to not pile up mutes/unmutes.
+ }
if (mHidden)
{
// get out of the channel entirely
@@ -990,7 +1033,6 @@ void LLWebRTCVoiceClient::updatePosition(void)
{
if (participant->mRegion != region->getRegionID()) {
participant->mRegion = region->getRegionID();
- setMuteMic(mMuteMic);
}
}
}
@@ -1115,13 +1157,14 @@ void LLWebRTCVoiceClient::sendPositionUpdate(bool force)
// Update our own volume on our participant, so it'll show up
// in the UI. This is done on all sessions, so switching
// sessions retains consistent volume levels.
-void LLWebRTCVoiceClient::updateOwnVolume() {
- F32 audio_level = 0.0;
- if (!mMuteMic && !mTuningMode)
+void LLWebRTCVoiceClient::updateOwnVolume()
+{
+ F32 audio_level = 0.0f;
+ if (!mMuteMic)
{
- audio_level = getAudioLevel();
+ float rms = mWebRTCDeviceInterface->getPeerConnectionAudioLevel();
+ audio_level = LEVEL_START_POINT - LEVEL_SCALE * rms;
}
-
sessionState::for_each(boost::bind(predUpdateOwnVolume, _1, audio_level));
}
@@ -1518,6 +1561,17 @@ void LLWebRTCVoiceClient::setMuteMic(bool muted)
}
mMuteMic = muted;
+
+ if (mIsInTuningMode)
+ {
+ return;
+ }
+
+ if (mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->setMute(muted, muted ? MUTE_FADE_DELAY_MS : 0); // delay for 40ms on mute to allow buffers to empty
+ }
+
// when you're hidden, your mic is always muted.
if (!mHidden)
{
@@ -1556,7 +1610,10 @@ void LLWebRTCVoiceClient::setMicGain(F32 gain)
if (gain != mMicGain)
{
mMicGain = gain;
- mWebRTCDeviceInterface->setPeerConnectionGain(gain);
+ if (mWebRTCDeviceInterface)
+ {
+ mWebRTCDeviceInterface->setMicGain(gain);
+ }
}
}
diff --git a/indra/newview/llvoicewebrtc.h b/indra/newview/llvoicewebrtc.h
index 71347f206a5..722d81fdc2b 100644
--- a/indra/newview/llvoicewebrtc.h
+++ b/indra/newview/llvoicewebrtc.h
@@ -444,10 +444,6 @@ class LLWebRTCVoiceClient : public LLSingleton,
private:
- // helper function to retrieve the audio level
- // Used in multiple places.
- float getAudioLevel();
-
// Coroutine support methods
//---
void voiceConnectionCoro();
@@ -458,7 +454,6 @@ class LLWebRTCVoiceClient : public LLSingleton,
LL::WorkQueue::weak_t mMainQueue;
- bool mTuningMode;
F32 mTuningMicGain;
int mTuningSpeakerVolume;
bool mDevicesListUpdated; // set to true when the device list has been updated