Skip to content

Commit

Permalink
Implement AudioDeviceModule::GetStats API from WebRTC.
Browse files Browse the repository at this point in the history
Should only be landed once
https://webrtc-review.googlesource.com/c/src/+/291040
is landed.

Bug: webrtc:14653
Change-Id: I31471064e8b0f88ad0904a400641586bf2382822
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/4154699
Reviewed-by: Henrik Boström <hbos@chromium.org>
Reviewed-by: Jeremy Roman <jbroman@chromium.org>
Commit-Queue: Fredrik Hernqvist <fhernqvist@google.com>
Cr-Commit-Position: refs/heads/main@{#1096068}
  • Loading branch information
Fredrik Hernqvist authored and Chromium LUCI CQ committed Jan 24, 2023
1 parent 907555d commit f639a71
Show file tree
Hide file tree
Showing 12 changed files with 257 additions and 14 deletions.
15 changes: 15 additions & 0 deletions chrome/test/data/webrtc/peerconnection_getstats.js
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ let kRTCInboundRtpStreamStats = new RTCStats(kRTCReceivedRtpStreamStats, {
estimatedPlayoutTimestamp: 'number',
fractionLost: 'number', // Obsolete, moved to RTCRemoteInboundRtpStreamStats.
decoderImplementation: 'string',
playoutId: 'string',
powerEfficientDecoder: 'boolean',
framesAssembledFromMultiplePackets: 'number',
totalAssemblyTime: 'number',
Expand Down Expand Up @@ -494,6 +495,20 @@ let kRTCCertificateStats = new RTCStats(null, {
});
addRTCStatsToAllowlist(Presence.MANDATORY, 'certificate', kRTCCertificateStats);

/*
* RTCAudioPlayoutStats
* https://w3c.github.io/webrtc-stats/#playoutstats-dict*
* @private
*/
let kRTCAudioPlayoutStats = new RTCStats(null, {
synthesizedSamplesDuration: 'number',
synthesizedSamplesEvents: 'number',
totalSamplesDuration: 'number',
totalPlayoutDelay: 'number',
totalSamplesCount: 'number',
});
addRTCStatsToAllowlist(Presence.OPTIONAL, 'audio-playout', kRTCAudioPlayoutStats);

// Public interface to tests. These are expected to be called with
// ExecuteJavascript invocations from the browser tests and will return answers
// through the DOM automation controller.
Expand Down
1 change: 1 addition & 0 deletions third_party/blink/renderer/modules/BUILD.gn
Original file line number Diff line number Diff line change
Expand Up @@ -612,6 +612,7 @@ source_set("unit_tests") {
"webaudio/stereo_panner_node_test.cc",
"webdatabase/dom_window_web_database_test.cc",
"webdatabase/quota_tracker_test.cc",
"webrtc/webrtc_audio_device_impl_test.cc",
"webshare/navigator_share_test.cc",
"websockets/dom_websocket_test.cc",
"websockets/mock_websocket_channel.cc",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,13 @@
#include "base/cfi_buildflags.h"
#include "base/functional/bind.h"
#include "base/run_loop.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "media/audio/audio_sink_parameters.h"
#include "media/audio/audio_source_parameters.h"
#include "media/base/audio_bus.h"
#include "media/base/audio_capturer_source.h"
#include "media/base/audio_glitch_info.h"
#include "media/base/mock_audio_renderer_sink.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
Expand All @@ -36,6 +39,7 @@
#include "third_party/webrtc/api/media_stream_interface.h"

using testing::_;
using testing::AnyNumber;
using testing::DoAll;
using testing::InvokeWithoutArgs;
using testing::Return;
Expand All @@ -50,16 +54,22 @@ const int kHardwareBufferSize = 512;
const char kDefaultOutputDeviceId[] = "";
const char kOtherOutputDeviceId[] = "other-output-device";
const char kInvalidOutputDeviceId[] = "invalid-device";
const media::AudioParameters kAudioParameters(
media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::ChannelLayoutConfig::Stereo(),
kHardwareSampleRate,
kHardwareBufferSize);

class MockAudioRendererSource : public blink::WebRtcAudioRendererSource {
public:
MockAudioRendererSource() {}
~MockAudioRendererSource() override {}
MOCK_METHOD4(RenderData,
MockAudioRendererSource() = default;
~MockAudioRendererSource() override = default;
MOCK_METHOD5(RenderData,
void(media::AudioBus* audio_bus,
int sample_rate,
base::TimeDelta audio_delay,
base::TimeDelta* current_time));
base::TimeDelta* current_time,
const media::AudioGlitchInfo& glitch_info));
MOCK_METHOD1(RemoveAudioRenderer, void(blink::WebRtcAudioRenderer* renderer));
MOCK_METHOD0(AudioRendererThreadStopped, void());
MOCK_METHOD1(SetOutputDeviceForAec, void(const String&));
Expand Down Expand Up @@ -88,9 +98,7 @@ class AudioDeviceFactoryTestingPlatformSupport : public blink::Platform {
params.device_id == kInvalidOutputDeviceId
? media::OUTPUT_DEVICE_STATUS_ERROR_INTERNAL
: media::OUTPUT_DEVICE_STATUS_OK,
media::AudioParameters(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::ChannelLayoutConfig::Stereo(),
kHardwareSampleRate, kHardwareBufferSize));
kAudioParameters);

if (params.device_id != kInvalidOutputDeviceId) {
EXPECT_CALL(*mock_sink_.get(), Start());
Expand Down Expand Up @@ -202,6 +210,10 @@ class WebRtcAudioRendererTest : public testing::Test {
return audio_device_factory_platform_->mock_sink();
}

media::AudioRendererSink::RenderCallback* render_callback() {
return mock_sink()->callback();
}

void TearDown() override {
base::RunLoop().RunUntilIdle();
renderer_proxy_ = nullptr;
Expand Down Expand Up @@ -296,6 +308,31 @@ TEST_F(WebRtcAudioRendererTest, DISABLED_VerifySinkParameters) {
renderer_proxy_->Stop();
}

TEST_F(WebRtcAudioRendererTest, Render) {
SetupRenderer(kDefaultOutputDeviceId);
EXPECT_EQ(kDefaultOutputDeviceId,
mock_sink()->GetOutputDeviceInfo().device_id());
renderer_proxy_->Start();

auto dest = media::AudioBus::Create(kAudioParameters);
media::AudioGlitchInfo glitch_info{};
auto audio_delay = base::Seconds(1);

EXPECT_CALL(*mock_sink(), CurrentThreadIsRenderingThread())
.WillRepeatedly(Return(true));
// We cannot place any specific expectations on the calls to RenderData,
// because they vary depending on whether or not the fifo is used, which in
// turn varies depending on the platform.
EXPECT_CALL(*source_, RenderData(_, kAudioParameters.sample_rate(), _, _, _))
.Times(AnyNumber());
render_callback()->Render(audio_delay, base::TimeTicks(), glitch_info,
dest.get());

EXPECT_CALL(*mock_sink(), Stop());
EXPECT_CALL(*source_.get(), RemoveAudioRenderer(renderer_.get()));
renderer_proxy_->Stop();
}

TEST_F(WebRtcAudioRendererTest, NonDefaultDevice) {
SetupRenderer(kDefaultOutputDeviceId);
EXPECT_EQ(kDefaultOutputDeviceId,
Expand Down
1 change: 1 addition & 0 deletions third_party/blink/renderer/modules/webrtc/DEPS
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ include_rules = [
"+media/base/audio_bus.h",
"+media/base/audio_capturer_source.h",
"+media/base/audio_decoder.h",
"+media/base/audio_glitch_info.h",
"+media/base/audio_power_monitor.h",
"+media/base/audio_latency.h",
"+media/base/audio_parameters.h",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include "base/trace_event/trace_event.h"
#include "media/base/audio_bus.h"
#include "media/base/audio_parameters.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/sample_rates.h"
#include "third_party/blink/public/platform/modules/webrtc/webrtc_logging.h"
#include "third_party/blink/renderer/modules/mediastream/processed_local_audio_source.h"
Expand Down Expand Up @@ -50,14 +51,24 @@ WebRtcAudioDeviceImpl::~WebRtcAudioDeviceImpl() {
DCHECK(!initialized_) << "Terminate must have been called.";
}

void WebRtcAudioDeviceImpl::RenderData(media::AudioBus* audio_bus,
int sample_rate,
base::TimeDelta audio_delay,
base::TimeDelta* current_time) {
void WebRtcAudioDeviceImpl::RenderData(
media::AudioBus* audio_bus,
int sample_rate,
base::TimeDelta audio_delay,
base::TimeDelta* current_time,
const media::AudioGlitchInfo& glitch_info) {
TRACE_EVENT2("audio", "WebRtcAudioDeviceImpl::RenderData", "sample_rate",
sample_rate, "audio_delay_ms", audio_delay.InMilliseconds());
{
base::AutoLock auto_lock(lock_);
cumulative_glitch_info_ += glitch_info;
total_samples_count_ += audio_bus->frames();
// |total_playout_delay_| refers to the sum of playout delays for all
// samples, so we add the delay multiplied by the number of samples. See
// https://w3c.github.io/webrtc-stats/#dom-rtcaudioplayoutstats-totalplayoutdelay
total_playout_delay_ += audio_delay * audio_bus->frames();
total_samples_duration_ += media::AudioTimestampHelper::FramesToTime(
audio_bus->frames(), sample_rate);
#if DCHECK_IS_ON()
DCHECK(!renderer_ || renderer_->CurrentThreadIsRenderingThread());
if (!audio_renderer_thread_checker_.CalledOnValidThread()) {
Expand Down Expand Up @@ -387,6 +398,19 @@ void WebRtcAudioDeviceImpl::RemovePlayoutSink(
playout_sinks_.remove(sink);
}

absl::optional<webrtc::AudioDeviceModule::Stats>
WebRtcAudioDeviceImpl::GetStats() const {
base::AutoLock auto_lock(lock_);
return absl::optional<webrtc::AudioDeviceModule::Stats>(
webrtc::AudioDeviceModule::Stats{
.synthesized_samples_duration_s =
cumulative_glitch_info_.duration.InSecondsF(),
.synthesized_samples_events = cumulative_glitch_info_.count,
.total_samples_duration_s = total_samples_duration_.InSecondsF(),
.total_playout_delay_s = total_playout_delay_.InSecondsF(),
.total_samples_count = total_samples_count_});
}

base::UnguessableToken
WebRtcAudioDeviceImpl::GetAuthorizedDeviceSessionIdForAudioRenderer() {
DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
#include "base/unguessable_token.h"
#include "media/base/audio_glitch_info.h"
#include "third_party/blink/renderer/modules/modules_export.h"
#include "third_party/blink/renderer/modules/webrtc/webrtc_audio_device_not_impl.h"
#include "third_party/blink/renderer/platform/webrtc/webrtc_source.h"
Expand Down Expand Up @@ -120,7 +121,8 @@ class MODULES_EXPORT WebRtcAudioDeviceImpl
void RenderData(media::AudioBus* audio_bus,
int sample_rate,
base::TimeDelta audio_delay,
base::TimeDelta* current_time) override;
base::TimeDelta* current_time,
const media::AudioGlitchInfo& glitch_info) override;

// Called on the main render thread.
void RemoveAudioRenderer(blink::WebRtcAudioRenderer* renderer) override;
Expand All @@ -131,6 +133,8 @@ class MODULES_EXPORT WebRtcAudioDeviceImpl
void AddPlayoutSink(blink::WebRtcPlayoutDataSource::Sink* sink) override;
void RemovePlayoutSink(blink::WebRtcPlayoutDataSource::Sink* sink) override;

absl::optional<webrtc::AudioDeviceModule::Stats> GetStats() const override;

private:
using CapturerList = std::list<ProcessedLocalAudioSource*>;
using PlayoutDataSinkList = std::list<blink::WebRtcPlayoutDataSource::Sink*>;
Expand Down Expand Up @@ -180,6 +184,13 @@ class MODULES_EXPORT WebRtcAudioDeviceImpl

// The output device used for echo cancellation
String output_device_id_for_aec_;

// Corresponds to RTCAudioPlayoutStats as defined in
// https://w3c.github.io/webrtc-stats/#playoutstats-dict*
media::AudioGlitchInfo cumulative_glitch_info_ GUARDED_BY(lock_);
base::TimeDelta total_samples_duration_ GUARDED_BY(lock_);
base::TimeDelta total_playout_delay_ GUARDED_BY(lock_);
uint64_t total_samples_count_ GUARDED_BY(lock_) = 0;
};

} // namespace blink
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "third_party/blink/renderer/modules/webrtc/webrtc_audio_device_impl.h"

#include <memory>

#include "base/time/time.h"
#include "media/base/audio_bus.h"
#include "media/base/audio_glitch_info.h"
#include "media/base/audio_parameters.h"
#include "media/base/audio_timestamp_helper.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/renderer/platform/webrtc/webrtc_source.h"
#include "third_party/webrtc/rtc_base/ref_counted_object.h"

namespace blink {

namespace {

class MockAudioTransport : public webrtc::AudioTransport {
public:
MockAudioTransport() = default;

MockAudioTransport(const MockAudioTransport&) = delete;
MockAudioTransport& operator=(const MockAudioTransport&) = delete;

MOCK_METHOD10(RecordedDataIsAvailable,
int32_t(const void* audioSamples,
size_t nSamples,
size_t nBytesPerSample,
size_t nChannels,
uint32_t samplesPerSec,
uint32_t totalDelayMS,
int32_t clockDrift,
uint32_t currentMicLevel,
bool keyPressed,
uint32_t& newMicLevel));

MOCK_METHOD8(NeedMorePlayData,
int32_t(size_t nSamples,
size_t nBytesPerSample,
size_t nChannels,
uint32_t samplesPerSec,
void* audioSamples,
size_t& nSamplesOut,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms));

MOCK_METHOD7(PullRenderData,
void(int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
void* audio_data,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms));
};

const int kHardwareSampleRate = 44100;
const int kHardwareBufferSize = 512;

const media::AudioParameters kAudioParameters =
media::AudioParameters(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
media::ChannelLayoutConfig::Stereo(),
kHardwareSampleRate,
kHardwareBufferSize);

} // namespace

class WebRtcAudioDeviceImplTest : public testing::Test {
public:
WebRtcAudioDeviceImplTest()
: audio_device_(
new rtc::RefCountedObject<blink::WebRtcAudioDeviceImpl>()),
audio_transport_(new MockAudioTransport()) {
audio_device_module()->Init();
audio_device_module()->RegisterAudioCallback(audio_transport_.get());
}

~WebRtcAudioDeviceImplTest() override { audio_device_module()->Terminate(); }

protected:
webrtc::AudioDeviceModule* audio_device_module() {
return static_cast<webrtc::AudioDeviceModule*>(audio_device_.get());
}

scoped_refptr<blink::WebRtcAudioDeviceImpl> audio_device_;
std::unique_ptr<MockAudioTransport> audio_transport_;
};

// Verify that stats are accumulated during calls to RenderData and are
// available through GetStats().
TEST_F(WebRtcAudioDeviceImplTest, GetStats) {
auto audio_bus = media::AudioBus::Create(kAudioParameters);
int sample_rate = kAudioParameters.sample_rate();
auto audio_delay = base::Seconds(1);
base::TimeDelta current_time;
media::AudioGlitchInfo glitch_info;
glitch_info.duration = base::Seconds(2);
glitch_info.count = 3;

for (int i = 0; i < 10; i++) {
webrtc::AudioDeviceModule::Stats stats = *audio_device_->GetStats();
EXPECT_EQ(stats.synthesized_samples_duration_s,
(base::Seconds(2) * i).InSecondsF());
EXPECT_EQ(stats.synthesized_samples_events, 3ull * i);
EXPECT_EQ(stats.total_samples_count,
static_cast<uint64_t>(audio_bus->frames() * i));
EXPECT_EQ(stats.total_playout_delay_s,
(audio_bus->frames() * i * base::Seconds(1)).InSecondsF());
EXPECT_EQ(stats.total_samples_duration_s,
(media::AudioTimestampHelper::FramesToTime(audio_bus->frames(),
sample_rate) *
i)
.InSecondsF());
audio_device_->RenderData(audio_bus.get(), sample_rate, audio_delay,
&current_time, glitch_info);
}
}

} // namespace blink
Original file line number Diff line number Diff line change
Expand Up @@ -618,6 +618,7 @@ int WebRtcAudioRenderer::Render(base::TimeDelta delay,
return 0;

audio_delay_ = delay;
glitch_info_accumulator_.Add(glitch_info);

// Pull the data we will deliver.
if (audio_fifo_)
Expand Down Expand Up @@ -673,7 +674,7 @@ void WebRtcAudioRenderer::SourceCallback(int fifo_frame_delay,
// We need to keep render data for the |source_| regardless of |state_|,
// otherwise the data will be buffered up inside |source_|.
source_->RenderData(audio_bus, sink_params_.sample_rate(), output_delay,
&current_time_);
&current_time_, glitch_info_accumulator_.GetAndReset());

// Avoid filling up the audio bus if we are not playing; instead
// return here and ensure that the returned value in Render() is 0.
Expand Down

0 comments on commit f639a71

Please sign in to comment.